comment stringlengths 1 45k | method_body stringlengths 23 281k | target_code stringlengths 0 5.16k | method_body_after stringlengths 12 281k | context_before stringlengths 8 543k | context_after stringlengths 8 543k |
|---|---|---|---|---|---|
Found this a bit confusing as type does not indicate whether order matters. I suggest using same type as the collector. `toSet` -> `Set`, `toList` -> `List`. | private void triggerReadyJobs(LockedApplication application) {
List<Triggering> triggerings = new ArrayList<>();
Change change = application.change();
List<DeploymentSpec.Step> steps = application.deploymentSpec().steps();
if (steps.isEmpty()) steps = Collections.singletonList(new DeploymentSpec.DeclaredZone(Environment.test));
Optional<Instant> completedAt = Optional.of(clock.instant());
String reason = "Deploying " + change.toString();
for (DeploymentSpec.Step step : steps) {
LockedApplication app = application;
Collection<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet());
Collection<JobType> remainingJobs = stepJobs.stream().filter(job -> ! completedAt(app, job).isPresent()).collect(toList());
if (remainingJobs.isEmpty()) {
if (stepJobs.isEmpty()) {
Duration delay = ((DeploymentSpec.Delay) step).duration();
completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant()));
reason += " after a delay of " + delay;
}
else {
completedAt = stepJobs.stream().map(job -> completedAt(app, job).get()).max(naturalOrder());
reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", "));
}
}
else if (completedAt.isPresent()) {
for (JobType job : remainingJobs)
triggerings.add(new Triggering(app, job, reason, stepJobs));
completedAt = Optional.empty();
}
}
if (completedAt.isPresent())
application = application.withChange(Change.empty());
for (Triggering triggering : triggerings)
if (application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), change) && allowedToTriggerNow(triggering, application))
application = trigger(triggering, application);
applications().store(application);
} | Collection<JobType> remainingJobs = stepJobs.stream().filter(job -> ! completedAt(app, job).isPresent()).collect(toList()); | private void triggerReadyJobs(LockedApplication application) {
List<Triggering> triggerings = new ArrayList<>();
Change change = application.change();
List<DeploymentSpec.Step> steps = application.deploymentSpec().steps();
if (steps.isEmpty()) steps = Collections.singletonList(new DeploymentSpec.DeclaredZone(Environment.test));
Optional<Instant> completedAt = Optional.of(clock.instant());
String reason = "Deploying " + change.toString();
for (DeploymentSpec.Step step : steps) {
LockedApplication app = application;
Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet());
Set<JobType> remainingJobs = stepJobs.stream().filter(job -> ! completedAt(app, job).isPresent()).collect(toSet());
if (remainingJobs.isEmpty()) {
if (stepJobs.isEmpty()) {
Duration delay = ((DeploymentSpec.Delay) step).duration();
completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant()));
reason += " after a delay of " + delay;
}
else {
completedAt = stepJobs.stream().map(job -> completedAt(app, job).get()).max(naturalOrder());
reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", "));
}
}
else if (completedAt.isPresent()) {
for (JobType job : remainingJobs)
triggerings.add(new Triggering(app, job, reason, stepJobs));
completedAt = Optional.empty();
}
}
if (completedAt.isPresent())
application = application.withChange(Change.empty());
for (Triggering triggering : triggerings)
if (application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), change) && allowedToTriggerNow(triggering, application))
application = trigger(triggering, application);
applications().store(application);
} | class DeploymentTrigger {
/**
* The max duration a job may run before we consider it dead/hanging
*/
private final Duration jobTimeout;
private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName());
private final Controller controller;
private final Clock clock;
private final DeploymentQueue deploymentQueue;
private final DeploymentOrder order;
public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) {
Objects.requireNonNull(controller, "controller cannot be null");
Objects.requireNonNull(curator, "curator cannot be null");
Objects.requireNonNull(clock, "clock cannot be null");
this.controller = controller;
this.clock = clock;
this.deploymentQueue = new DeploymentQueue(controller, curator);
this.order = new DeploymentOrder(controller);
this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1);
}
/**
* Returns the time in the past before which jobs are at this moment considered unresponsive
*/
public Instant jobTimeoutLimit() {
return clock.instant().minus(jobTimeout);
}
public DeploymentQueue deploymentQueue() {
return deploymentQueue;
}
public DeploymentOrder deploymentOrder() {
return order;
}
/**
* Called each time a job completes (successfully or not) to record information used when deciding what to trigger.
*
* @param report information about the job that just completed
*/
public void triggerFromCompletion(JobReport report) {
applications().lockOrThrow(report.applicationId(), application -> {
ApplicationVersion applicationVersion = report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber()))
.orElse(ApplicationVersion.unknown);
application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller);
application = application.withProjectId(report.projectId());
if (report.jobType() == JobType.component && report.success()) {
if ( ! acceptNewApplicationVersionNow(application))
application = application.withOutstandingChange(Change.of(applicationVersion));
else
application = application.withChange(application.change().with(applicationVersion));
}
applications().store(application);
});
}
/**
* Find jobs that can and should run but are currently not.
*/
public void triggerReadyJobs() {
ApplicationList applications = ApplicationList.from(applications().asList());
applications = applications.notPullRequest()
.withProjectId()
.deploying();
for (Application application : applications.asList())
applications().lockIfPresent(application.id(), this::triggerReadyJobs);
}
/**
* Trigger a job for an application, if allowed
*
* @param triggering the triggering to execute, i.e., application, job type and reason
* @return the application in the triggered state, if actually triggered. This *must* be stored by the caller
*/
public LockedApplication trigger(Triggering triggering, LockedApplication application) {
if ( ! application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), application.change())) {
log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", triggering.jobType,
application, triggering.reason));
return application;
}
log.info(triggering.toString());
deploymentQueue.addJob(application.id(), triggering.jobType, triggering.retry);
return application.withJobTriggering(triggering.jobType,
clock.instant(),
application.deployVersionFor(triggering.jobType, controller),
application.deployApplicationVersionFor(triggering.jobType, controller, false)
.orElse(ApplicationVersion.unknown),
triggering.reason);
}
/**
* Triggers a change of this application
*
* @param applicationId the application to trigger
* @throws IllegalArgumentException if this application already have an ongoing change
*/
public void triggerChange(ApplicationId applicationId, Change change) {
applications().lockOrThrow(applicationId, application -> {
if (application.change().isPresent() && !application.deploymentJobs().hasFailures())
throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " +
application.change() + " is already in progress");
application = application.withChange(change);
if (change.application().isPresent())
application = application.withOutstandingChange(Change.empty());
applications().store(application);
});
}
/**
* Cancels any ongoing upgrade of the given application
*
* @param applicationId the application to trigger
*/
public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) {
applications().lockOrThrow(applicationId, application -> {
applications().store(application.withChange(application.change().application()
.map(Change::of)
.filter(change -> keepApplicationChange)
.orElse(Change.empty())));
if ( ! applications().require(applicationId).change().isPresent())
deploymentQueue.removeJobs(application.id());
});
}
/**
* Finds the next step to trigger for the given application, if any, and triggers it
*/
private Optional<Instant> completedAt(Application application, JobType jobType) {
return jobType.isProduction()
? changeCompletedAt(application, jobType)
: application.deploymentJobs().successAt(application.change(), jobType);
}
private boolean allowedToTriggerNow(Triggering triggering, Application application) {
if (application.deploymentJobs().isRunning(triggering.jobType, jobTimeoutLimit()))
return false;
if ( ! triggering.jobType.isProduction())
return true;
if ( ! triggering.concurrentlyWith.containsAll(JobList.from(application)
.production()
.running(jobTimeoutLimit())
.mapToList(JobStatus::type)))
return false;
if (application.change().blockedBy(application.deploymentSpec(), clock.instant()))
return false;
return true;
}
private ApplicationController applications() {
return controller.applications();
}
/** Returns the instant when the given application's current change was completed for the given job. */
private Optional<Instant> changeCompletedAt(Application application, JobType job) {
if ( ! job.isProduction())
throw new IllegalArgumentException(job + " is not a production job!");
Deployment deployment = application.deployments().get(job.zone(controller.system()).get());
if (deployment == null)
return Optional.empty();
int applicationComparison = application.change().application()
.map(version -> version.compareTo(deployment.applicationVersion()))
.orElse(0);
int platformComparison = application.change().platform()
.map(version -> version.compareTo(deployment.version()))
.orElse(0);
if (applicationComparison == -1 || platformComparison == -1)
return Optional.of(deployment.at());
return applicationComparison == 0 && platformComparison == 0 ? Optional.of(deployment.at()) : Optional.empty();
}
private boolean acceptNewApplicationVersionNow(LockedApplication application) {
if ( ! application.change().isPresent()) return true;
if (application.change().application().isPresent()) return true;
if (application.deploymentJobs().hasFailures()) return true;
if ( ! application.deploymentSpec().canUpgradeAt(clock.instant())
|| ! application.deploymentSpec().canChangeRevisionAt(clock.instant()))
return true;
return false;
}
public static class Triggering {
private final LockedApplication application;
private final JobType jobType;
private final boolean retry;
private final String reason;
private final Collection<JobType> concurrentlyWith;
public Triggering(LockedApplication application, JobType jobType, String reason, Collection<JobType> concurrentlyWith) {
this.application = application;
this.jobType = jobType;
this.concurrentlyWith = concurrentlyWith;
JobStatus status = application.deploymentJobs().jobStatus().get(jobType);
this.retry = status != null && status.jobError().filter(JobError.outOfCapacity::equals).isPresent();
this.reason = retry ? "Retrying on out of capacity" : reason;
}
public Triggering(LockedApplication application, JobType jobType, String reason) {
this(application, jobType, reason, Collections.emptySet());
}
public String toString() {
return String.format("Triggering %s for %s, deploying %s: %s", jobType, application, application.change(), reason);
}
}
} | class DeploymentTrigger {
/**
* The max duration a job may run before we consider it dead/hanging
*/
private final Duration jobTimeout;
private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName());
private final Controller controller;
private final Clock clock;
private final DeploymentQueue deploymentQueue;
private final DeploymentOrder order;
public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) {
Objects.requireNonNull(controller, "controller cannot be null");
Objects.requireNonNull(curator, "curator cannot be null");
Objects.requireNonNull(clock, "clock cannot be null");
this.controller = controller;
this.clock = clock;
this.deploymentQueue = new DeploymentQueue(controller, curator);
this.order = new DeploymentOrder(controller::system);
this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1);
}
/**
* Returns the time in the past before which jobs are at this moment considered unresponsive
*/
public Instant jobTimeoutLimit() {
return clock.instant().minus(jobTimeout);
}
public DeploymentQueue deploymentQueue() {
return deploymentQueue;
}
public DeploymentOrder deploymentOrder() {
return order;
}
/**
* Called each time a job completes (successfully or not) to record information used when deciding what to trigger.
*
* @param report information about the job that just completed
*/
public void triggerFromCompletion(JobReport report) {
applications().lockOrThrow(report.applicationId(), application -> {
ApplicationVersion applicationVersion = report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber()))
.orElse(ApplicationVersion.unknown);
application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller);
application = application.withProjectId(report.projectId());
if (report.jobType() == JobType.component && report.success()) {
if ( ! acceptNewApplicationVersionNow(application))
application = application.withOutstandingChange(Change.of(applicationVersion));
else
application = application.withChange(application.change().with(applicationVersion));
}
applications().store(application);
});
}
/**
* Find jobs that can and should run but are currently not.
*/
public void triggerReadyJobs() {
ApplicationList applications = ApplicationList.from(applications().asList());
applications = applications.notPullRequest()
.withProjectId()
.deploying();
for (Application application : applications.asList())
applications().lockIfPresent(application.id(), this::triggerReadyJobs);
}
/**
* Trigger a job for an application, if allowed
*
* @param triggering the triggering to execute, i.e., application, job type and reason
* @return the application in the triggered state, if actually triggered. This *must* be stored by the caller
*/
public LockedApplication trigger(Triggering triggering, LockedApplication application) {
if ( ! application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), application.change())) {
log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", triggering.jobType,
application, triggering.reason));
return application;
}
log.info(triggering.toString());
deploymentQueue.addJob(application.id(), triggering.jobType, triggering.retry);
return application.withJobTriggering(triggering.jobType,
clock.instant(),
application.deployVersionFor(triggering.jobType, controller),
application.deployApplicationVersionFor(triggering.jobType, controller, false)
.orElse(ApplicationVersion.unknown),
triggering.reason);
}
/**
* Triggers a change of this application
*
* @param applicationId the application to trigger
* @throws IllegalArgumentException if this application already have an ongoing change
*/
public void triggerChange(ApplicationId applicationId, Change change) {
applications().lockOrThrow(applicationId, application -> {
if (application.change().isPresent() && !application.deploymentJobs().hasFailures())
throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " +
application.change() + " is already in progress");
application = application.withChange(change);
if (change.application().isPresent())
application = application.withOutstandingChange(Change.empty());
applications().store(application);
});
}
/**
* Cancels any ongoing upgrade of the given application
*
* @param applicationId the application to trigger
*/
public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) {
applications().lockOrThrow(applicationId, application -> {
applications().store(application.withChange(application.change().application()
.map(Change::of)
.filter(change -> keepApplicationChange)
.orElse(Change.empty())));
if ( ! applications().require(applicationId).change().isPresent())
deploymentQueue.removeJobs(application.id());
});
}
/**
* Finds the next step to trigger for the given application, if any, and triggers it
*/
private Optional<Instant> completedAt(Application application, JobType jobType) {
return jobType.isProduction()
? changeCompletedAt(application, jobType)
: application.deploymentJobs().successAt(application.change(), jobType);
}
private boolean allowedToTriggerNow(Triggering triggering, Application application) {
if (application.deploymentJobs().isRunning(triggering.jobType, jobTimeoutLimit()))
return false;
if ( ! triggering.jobType.isProduction())
return true;
if ( ! triggering.concurrentlyWith.containsAll(JobList.from(application)
.production()
.running(jobTimeoutLimit())
.mapToList(JobStatus::type)))
return false;
if (application.change().blockedBy(application.deploymentSpec(), clock.instant()))
return false;
return true;
}
private ApplicationController applications() {
return controller.applications();
}
/** Returns the instant when the given application's current change was completed for the given job. */
private Optional<Instant> changeCompletedAt(Application application, JobType job) {
if ( ! job.isProduction())
throw new IllegalArgumentException(job + " is not a production job!");
Deployment deployment = application.deployments().get(job.zone(controller.system()).get());
if (deployment == null)
return Optional.empty();
int applicationComparison = application.change().application()
.map(version -> version.compareTo(deployment.applicationVersion()))
.orElse(0);
int platformComparison = application.change().platform()
.map(version -> version.compareTo(deployment.version()))
.orElse(0);
return Optional.of(deployment.at())
.filter(ignored -> applicationComparison == -1 || platformComparison == -1
|| (applicationComparison == 0 && platformComparison == 0));
}
private boolean acceptNewApplicationVersionNow(LockedApplication application) {
if ( ! application.change().isPresent()) return true;
if (application.change().application().isPresent()) return true;
if (application.deploymentJobs().hasFailures()) return true;
if ( ! application.deploymentSpec().canUpgradeAt(clock.instant())
|| ! application.deploymentSpec().canChangeRevisionAt(clock.instant()))
return true;
return false;
}
public static class Triggering {
private final LockedApplication application;
private final JobType jobType;
private final boolean retry;
private final String reason;
private final Collection<JobType> concurrentlyWith;
public Triggering(LockedApplication application, JobType jobType, String reason, Collection<JobType> concurrentlyWith) {
this.application = application;
this.jobType = jobType;
this.concurrentlyWith = concurrentlyWith;
JobStatus status = application.deploymentJobs().jobStatus().get(jobType);
this.retry = status != null && status.jobError().filter(JobError.outOfCapacity::equals).isPresent();
this.reason = retry ? "Retrying on out of capacity" : reason;
}
public Triggering(LockedApplication application, JobType jobType, String reason) {
this(application, jobType, reason, Collections.emptySet());
}
public String toString() {
return String.format("Triggering %s for %s, deploying %s: %s", jobType, application, application.change(), reason);
}
}
} |
Last couple of lines can be simplified to: ``` Optional.of(deployment.at()).filter(ignored -> { applicationComparison == -1 || platformComparion == -1 || (applicationComparison == 0 && platformComparion == 0) }); ``` | private Optional<Instant> changeCompletedAt(Application application, JobType job) {
if ( ! job.isProduction())
throw new IllegalArgumentException(job + " is not a production job!");
Deployment deployment = application.deployments().get(job.zone(controller.system()).get());
if (deployment == null)
return Optional.empty();
int applicationComparison = application.change().application()
.map(version -> version.compareTo(deployment.applicationVersion()))
.orElse(0);
int platformComparison = application.change().platform()
.map(version -> version.compareTo(deployment.version()))
.orElse(0);
if (applicationComparison == -1 || platformComparison == -1)
return Optional.of(deployment.at());
return applicationComparison == 0 && platformComparison == 0 ? Optional.of(deployment.at()) : Optional.empty();
} | return applicationComparison == 0 && platformComparison == 0 ? Optional.of(deployment.at()) : Optional.empty(); | private Optional<Instant> changeCompletedAt(Application application, JobType job) {
if ( ! job.isProduction())
throw new IllegalArgumentException(job + " is not a production job!");
Deployment deployment = application.deployments().get(job.zone(controller.system()).get());
if (deployment == null)
return Optional.empty();
int applicationComparison = application.change().application()
.map(version -> version.compareTo(deployment.applicationVersion()))
.orElse(0);
int platformComparison = application.change().platform()
.map(version -> version.compareTo(deployment.version()))
.orElse(0);
return Optional.of(deployment.at())
.filter(ignored -> applicationComparison == -1 || platformComparison == -1
|| (applicationComparison == 0 && platformComparison == 0));
} | class DeploymentTrigger {
/**
* The max duration a job may run before we consider it dead/hanging
*/
private final Duration jobTimeout;
private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName());
private final Controller controller;
private final Clock clock;
private final DeploymentQueue deploymentQueue;
private final DeploymentOrder order;
public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) {
Objects.requireNonNull(controller, "controller cannot be null");
Objects.requireNonNull(curator, "curator cannot be null");
Objects.requireNonNull(clock, "clock cannot be null");
this.controller = controller;
this.clock = clock;
this.deploymentQueue = new DeploymentQueue(controller, curator);
this.order = new DeploymentOrder(controller);
this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1);
}
/**
* Returns the time in the past before which jobs are at this moment considered unresponsive
*/
public Instant jobTimeoutLimit() {
return clock.instant().minus(jobTimeout);
}
public DeploymentQueue deploymentQueue() {
return deploymentQueue;
}
public DeploymentOrder deploymentOrder() {
return order;
}
/**
* Called each time a job completes (successfully or not) to record information used when deciding what to trigger.
*
* @param report information about the job that just completed
*/
public void triggerFromCompletion(JobReport report) {
applications().lockOrThrow(report.applicationId(), application -> {
ApplicationVersion applicationVersion = report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber()))
.orElse(ApplicationVersion.unknown);
application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller);
application = application.withProjectId(report.projectId());
if (report.jobType() == JobType.component && report.success()) {
if ( ! acceptNewApplicationVersionNow(application))
application = application.withOutstandingChange(Change.of(applicationVersion));
else
application = application.withChange(application.change().with(applicationVersion));
}
applications().store(application);
});
}
/**
* Find jobs that can and should run but are currently not.
*/
public void triggerReadyJobs() {
ApplicationList applications = ApplicationList.from(applications().asList());
applications = applications.notPullRequest()
.withProjectId()
.deploying();
for (Application application : applications.asList())
applications().lockIfPresent(application.id(), this::triggerReadyJobs);
}
/**
* Trigger a job for an application, if allowed
*
* @param triggering the triggering to execute, i.e., application, job type and reason
* @return the application in the triggered state, if actually triggered. This *must* be stored by the caller
*/
public LockedApplication trigger(Triggering triggering, LockedApplication application) {
if ( ! application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), application.change())) {
log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", triggering.jobType,
application, triggering.reason));
return application;
}
log.info(triggering.toString());
deploymentQueue.addJob(application.id(), triggering.jobType, triggering.retry);
return application.withJobTriggering(triggering.jobType,
clock.instant(),
application.deployVersionFor(triggering.jobType, controller),
application.deployApplicationVersionFor(triggering.jobType, controller, false)
.orElse(ApplicationVersion.unknown),
triggering.reason);
}
/**
* Triggers a change of this application
*
* @param applicationId the application to trigger
* @throws IllegalArgumentException if this application already have an ongoing change
*/
public void triggerChange(ApplicationId applicationId, Change change) {
applications().lockOrThrow(applicationId, application -> {
if (application.change().isPresent() && !application.deploymentJobs().hasFailures())
throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " +
application.change() + " is already in progress");
application = application.withChange(change);
if (change.application().isPresent())
application = application.withOutstandingChange(Change.empty());
applications().store(application);
});
}
/**
* Cancels any ongoing upgrade of the given application
*
* @param applicationId the application to trigger
*/
public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) {
applications().lockOrThrow(applicationId, application -> {
applications().store(application.withChange(application.change().application()
.map(Change::of)
.filter(change -> keepApplicationChange)
.orElse(Change.empty())));
if ( ! applications().require(applicationId).change().isPresent())
deploymentQueue.removeJobs(application.id());
});
}
/**
* Finds the next step to trigger for the given application, if any, and triggers it
*/
private void triggerReadyJobs(LockedApplication application) {
List<Triggering> triggerings = new ArrayList<>();
Change change = application.change();
List<DeploymentSpec.Step> steps = application.deploymentSpec().steps();
if (steps.isEmpty()) steps = Collections.singletonList(new DeploymentSpec.DeclaredZone(Environment.test));
Optional<Instant> completedAt = Optional.of(clock.instant());
String reason = "Deploying " + change.toString();
for (DeploymentSpec.Step step : steps) {
LockedApplication app = application;
Collection<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet());
Collection<JobType> remainingJobs = stepJobs.stream().filter(job -> ! completedAt(app, job).isPresent()).collect(toList());
if (remainingJobs.isEmpty()) {
if (stepJobs.isEmpty()) {
Duration delay = ((DeploymentSpec.Delay) step).duration();
completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant()));
reason += " after a delay of " + delay;
}
else {
completedAt = stepJobs.stream().map(job -> completedAt(app, job).get()).max(naturalOrder());
reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", "));
}
}
else if (completedAt.isPresent()) {
for (JobType job : remainingJobs)
triggerings.add(new Triggering(app, job, reason, stepJobs));
completedAt = Optional.empty();
}
}
if (completedAt.isPresent())
application = application.withChange(Change.empty());
for (Triggering triggering : triggerings)
if (application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), change) && allowedToTriggerNow(triggering, application))
application = trigger(triggering, application);
applications().store(application);
}
private Optional<Instant> completedAt(Application application, JobType jobType) {
return jobType.isProduction()
? changeCompletedAt(application, jobType)
: application.deploymentJobs().successAt(application.change(), jobType);
}
private boolean allowedToTriggerNow(Triggering triggering, Application application) {
if (application.deploymentJobs().isRunning(triggering.jobType, jobTimeoutLimit()))
return false;
if ( ! triggering.jobType.isProduction())
return true;
if ( ! triggering.concurrentlyWith.containsAll(JobList.from(application)
.production()
.running(jobTimeoutLimit())
.mapToList(JobStatus::type)))
return false;
if (application.change().blockedBy(application.deploymentSpec(), clock.instant()))
return false;
return true;
}
private ApplicationController applications() {
return controller.applications();
}
/** Returns the instant when the given application's current change was completed for the given job. */
private boolean acceptNewApplicationVersionNow(LockedApplication application) {
if ( ! application.change().isPresent()) return true;
if (application.change().application().isPresent()) return true;
if (application.deploymentJobs().hasFailures()) return true;
if ( ! application.deploymentSpec().canUpgradeAt(clock.instant())
|| ! application.deploymentSpec().canChangeRevisionAt(clock.instant()))
return true;
return false;
}
public static class Triggering {
private final LockedApplication application;
private final JobType jobType;
private final boolean retry;
private final String reason;
private final Collection<JobType> concurrentlyWith;
public Triggering(LockedApplication application, JobType jobType, String reason, Collection<JobType> concurrentlyWith) {
this.application = application;
this.jobType = jobType;
this.concurrentlyWith = concurrentlyWith;
JobStatus status = application.deploymentJobs().jobStatus().get(jobType);
this.retry = status != null && status.jobError().filter(JobError.outOfCapacity::equals).isPresent();
this.reason = retry ? "Retrying on out of capacity" : reason;
}
public Triggering(LockedApplication application, JobType jobType, String reason) {
this(application, jobType, reason, Collections.emptySet());
}
public String toString() {
return String.format("Triggering %s for %s, deploying %s: %s", jobType, application, application.change(), reason);
}
}
} | class DeploymentTrigger {
/**
* The max duration a job may run before we consider it dead/hanging
*/
private final Duration jobTimeout;
private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName());
private final Controller controller;
private final Clock clock;
private final DeploymentQueue deploymentQueue;
private final DeploymentOrder order;
public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) {
Objects.requireNonNull(controller, "controller cannot be null");
Objects.requireNonNull(curator, "curator cannot be null");
Objects.requireNonNull(clock, "clock cannot be null");
this.controller = controller;
this.clock = clock;
this.deploymentQueue = new DeploymentQueue(controller, curator);
this.order = new DeploymentOrder(controller::system);
this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1);
}
/**
* Returns the time in the past before which jobs are at this moment considered unresponsive
*/
public Instant jobTimeoutLimit() {
return clock.instant().minus(jobTimeout);
}
public DeploymentQueue deploymentQueue() {
return deploymentQueue;
}
public DeploymentOrder deploymentOrder() {
return order;
}
/**
* Called each time a job completes (successfully or not) to record information used when deciding what to trigger.
*
* @param report information about the job that just completed
*/
public void triggerFromCompletion(JobReport report) {
applications().lockOrThrow(report.applicationId(), application -> {
ApplicationVersion applicationVersion = report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber()))
.orElse(ApplicationVersion.unknown);
application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller);
application = application.withProjectId(report.projectId());
if (report.jobType() == JobType.component && report.success()) {
if ( ! acceptNewApplicationVersionNow(application))
application = application.withOutstandingChange(Change.of(applicationVersion));
else
application = application.withChange(application.change().with(applicationVersion));
}
applications().store(application);
});
}
/**
* Find jobs that can and should run but are currently not.
*/
public void triggerReadyJobs() {
ApplicationList applications = ApplicationList.from(applications().asList());
applications = applications.notPullRequest()
.withProjectId()
.deploying();
for (Application application : applications.asList())
applications().lockIfPresent(application.id(), this::triggerReadyJobs);
}
/**
* Trigger a job for an application, if allowed
*
* @param triggering the triggering to execute, i.e., application, job type and reason
* @return the application in the triggered state, if actually triggered. This *must* be stored by the caller
*/
public LockedApplication trigger(Triggering triggering, LockedApplication application) {
if ( ! application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), application.change())) {
log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", triggering.jobType,
application, triggering.reason));
return application;
}
log.info(triggering.toString());
deploymentQueue.addJob(application.id(), triggering.jobType, triggering.retry);
return application.withJobTriggering(triggering.jobType,
clock.instant(),
application.deployVersionFor(triggering.jobType, controller),
application.deployApplicationVersionFor(triggering.jobType, controller, false)
.orElse(ApplicationVersion.unknown),
triggering.reason);
}
/**
* Triggers a change of this application
*
* @param applicationId the application to trigger
* @throws IllegalArgumentException if this application already have an ongoing change
*/
public void triggerChange(ApplicationId applicationId, Change change) {
applications().lockOrThrow(applicationId, application -> {
if (application.change().isPresent() && !application.deploymentJobs().hasFailures())
throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " +
application.change() + " is already in progress");
application = application.withChange(change);
if (change.application().isPresent())
application = application.withOutstandingChange(Change.empty());
applications().store(application);
});
}
/**
* Cancels any ongoing upgrade of the given application
*
* @param applicationId the application to trigger
*/
public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) {
applications().lockOrThrow(applicationId, application -> {
applications().store(application.withChange(application.change().application()
.map(Change::of)
.filter(change -> keepApplicationChange)
.orElse(Change.empty())));
if ( ! applications().require(applicationId).change().isPresent())
deploymentQueue.removeJobs(application.id());
});
}
/**
* Finds the next step to trigger for the given application, if any, and triggers it
*/
private void triggerReadyJobs(LockedApplication application) {
List<Triggering> triggerings = new ArrayList<>();
Change change = application.change();
List<DeploymentSpec.Step> steps = application.deploymentSpec().steps();
if (steps.isEmpty()) steps = Collections.singletonList(new DeploymentSpec.DeclaredZone(Environment.test));
Optional<Instant> completedAt = Optional.of(clock.instant());
String reason = "Deploying " + change.toString();
for (DeploymentSpec.Step step : steps) {
LockedApplication app = application;
Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet());
Set<JobType> remainingJobs = stepJobs.stream().filter(job -> ! completedAt(app, job).isPresent()).collect(toSet());
if (remainingJobs.isEmpty()) {
if (stepJobs.isEmpty()) {
Duration delay = ((DeploymentSpec.Delay) step).duration();
completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant()));
reason += " after a delay of " + delay;
}
else {
completedAt = stepJobs.stream().map(job -> completedAt(app, job).get()).max(naturalOrder());
reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", "));
}
}
else if (completedAt.isPresent()) {
for (JobType job : remainingJobs)
triggerings.add(new Triggering(app, job, reason, stepJobs));
completedAt = Optional.empty();
}
}
if (completedAt.isPresent())
application = application.withChange(Change.empty());
for (Triggering triggering : triggerings)
if (application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), change) && allowedToTriggerNow(triggering, application))
application = trigger(triggering, application);
applications().store(application);
}
private Optional<Instant> completedAt(Application application, JobType jobType) {
return jobType.isProduction()
? changeCompletedAt(application, jobType)
: application.deploymentJobs().successAt(application.change(), jobType);
}
private boolean allowedToTriggerNow(Triggering triggering, Application application) {
if (application.deploymentJobs().isRunning(triggering.jobType, jobTimeoutLimit()))
return false;
if ( ! triggering.jobType.isProduction())
return true;
if ( ! triggering.concurrentlyWith.containsAll(JobList.from(application)
.production()
.running(jobTimeoutLimit())
.mapToList(JobStatus::type)))
return false;
if (application.change().blockedBy(application.deploymentSpec(), clock.instant()))
return false;
return true;
}
private ApplicationController applications() {
return controller.applications();
}
/** Returns the instant when the given application's current change was completed for the given job. */
private boolean acceptNewApplicationVersionNow(LockedApplication application) {
if ( ! application.change().isPresent()) return true;
if (application.change().application().isPresent()) return true;
if (application.deploymentJobs().hasFailures()) return true;
if ( ! application.deploymentSpec().canUpgradeAt(clock.instant())
|| ! application.deploymentSpec().canChangeRevisionAt(clock.instant()))
return true;
return false;
}
public static class Triggering {
private final LockedApplication application;
private final JobType jobType;
private final boolean retry;
private final String reason;
private final Collection<JobType> concurrentlyWith;
public Triggering(LockedApplication application, JobType jobType, String reason, Collection<JobType> concurrentlyWith) {
this.application = application;
this.jobType = jobType;
this.concurrentlyWith = concurrentlyWith;
JobStatus status = application.deploymentJobs().jobStatus().get(jobType);
this.retry = status != null && status.jobError().filter(JobError.outOfCapacity::equals).isPresent();
this.reason = retry ? "Retrying on out of capacity" : reason;
}
public Triggering(LockedApplication application, JobType jobType, String reason) {
this(application, jobType, reason, Collections.emptySet());
}
public String toString() {
return String.format("Triggering %s for %s, deploying %s: %s", jobType, application, application.change(), reason);
}
}
} |
Sure! | private void triggerReadyJobs(LockedApplication application) {
List<Triggering> triggerings = new ArrayList<>();
Change change = application.change();
List<DeploymentSpec.Step> steps = application.deploymentSpec().steps();
if (steps.isEmpty()) steps = Collections.singletonList(new DeploymentSpec.DeclaredZone(Environment.test));
Optional<Instant> completedAt = Optional.of(clock.instant());
String reason = "Deploying " + change.toString();
for (DeploymentSpec.Step step : steps) {
LockedApplication app = application;
Collection<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet());
Collection<JobType> remainingJobs = stepJobs.stream().filter(job -> ! completedAt(app, job).isPresent()).collect(toList());
if (remainingJobs.isEmpty()) {
if (stepJobs.isEmpty()) {
Duration delay = ((DeploymentSpec.Delay) step).duration();
completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant()));
reason += " after a delay of " + delay;
}
else {
completedAt = stepJobs.stream().map(job -> completedAt(app, job).get()).max(naturalOrder());
reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", "));
}
}
else if (completedAt.isPresent()) {
for (JobType job : remainingJobs)
triggerings.add(new Triggering(app, job, reason, stepJobs));
completedAt = Optional.empty();
}
}
if (completedAt.isPresent())
application = application.withChange(Change.empty());
for (Triggering triggering : triggerings)
if (application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), change) && allowedToTriggerNow(triggering, application))
application = trigger(triggering, application);
applications().store(application);
} | Collection<JobType> remainingJobs = stepJobs.stream().filter(job -> ! completedAt(app, job).isPresent()).collect(toList()); | private void triggerReadyJobs(LockedApplication application) {
List<Triggering> triggerings = new ArrayList<>();
Change change = application.change();
List<DeploymentSpec.Step> steps = application.deploymentSpec().steps();
if (steps.isEmpty()) steps = Collections.singletonList(new DeploymentSpec.DeclaredZone(Environment.test));
Optional<Instant> completedAt = Optional.of(clock.instant());
String reason = "Deploying " + change.toString();
for (DeploymentSpec.Step step : steps) {
LockedApplication app = application;
Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet());
Set<JobType> remainingJobs = stepJobs.stream().filter(job -> ! completedAt(app, job).isPresent()).collect(toSet());
if (remainingJobs.isEmpty()) {
if (stepJobs.isEmpty()) {
Duration delay = ((DeploymentSpec.Delay) step).duration();
completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant()));
reason += " after a delay of " + delay;
}
else {
completedAt = stepJobs.stream().map(job -> completedAt(app, job).get()).max(naturalOrder());
reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", "));
}
}
else if (completedAt.isPresent()) {
for (JobType job : remainingJobs)
triggerings.add(new Triggering(app, job, reason, stepJobs));
completedAt = Optional.empty();
}
}
if (completedAt.isPresent())
application = application.withChange(Change.empty());
for (Triggering triggering : triggerings)
if (application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), change) && allowedToTriggerNow(triggering, application))
application = trigger(triggering, application);
applications().store(application);
} | class DeploymentTrigger {
/**
* The max duration a job may run before we consider it dead/hanging
*/
private final Duration jobTimeout;
private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName());
private final Controller controller;
private final Clock clock;
private final DeploymentQueue deploymentQueue;
private final DeploymentOrder order;
public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) {
Objects.requireNonNull(controller, "controller cannot be null");
Objects.requireNonNull(curator, "curator cannot be null");
Objects.requireNonNull(clock, "clock cannot be null");
this.controller = controller;
this.clock = clock;
this.deploymentQueue = new DeploymentQueue(controller, curator);
this.order = new DeploymentOrder(controller);
this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1);
}
/**
* Returns the time in the past before which jobs are at this moment considered unresponsive
*/
public Instant jobTimeoutLimit() {
return clock.instant().minus(jobTimeout);
}
public DeploymentQueue deploymentQueue() {
return deploymentQueue;
}
public DeploymentOrder deploymentOrder() {
return order;
}
/**
* Called each time a job completes (successfully or not) to record information used when deciding what to trigger.
*
* @param report information about the job that just completed
*/
public void triggerFromCompletion(JobReport report) {
applications().lockOrThrow(report.applicationId(), application -> {
ApplicationVersion applicationVersion = report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber()))
.orElse(ApplicationVersion.unknown);
application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller);
application = application.withProjectId(report.projectId());
if (report.jobType() == JobType.component && report.success()) {
if ( ! acceptNewApplicationVersionNow(application))
application = application.withOutstandingChange(Change.of(applicationVersion));
else
application = application.withChange(application.change().with(applicationVersion));
}
applications().store(application);
});
}
/**
* Find jobs that can and should run but are currently not.
*/
public void triggerReadyJobs() {
ApplicationList applications = ApplicationList.from(applications().asList());
applications = applications.notPullRequest()
.withProjectId()
.deploying();
for (Application application : applications.asList())
applications().lockIfPresent(application.id(), this::triggerReadyJobs);
}
/**
* Trigger a job for an application, if allowed
*
* @param triggering the triggering to execute, i.e., application, job type and reason
* @return the application in the triggered state, if actually triggered. This *must* be stored by the caller
*/
public LockedApplication trigger(Triggering triggering, LockedApplication application) {
if ( ! application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), application.change())) {
log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", triggering.jobType,
application, triggering.reason));
return application;
}
log.info(triggering.toString());
deploymentQueue.addJob(application.id(), triggering.jobType, triggering.retry);
return application.withJobTriggering(triggering.jobType,
clock.instant(),
application.deployVersionFor(triggering.jobType, controller),
application.deployApplicationVersionFor(triggering.jobType, controller, false)
.orElse(ApplicationVersion.unknown),
triggering.reason);
}
/**
* Triggers a change of this application
*
* @param applicationId the application to trigger
* @throws IllegalArgumentException if this application already have an ongoing change
*/
public void triggerChange(ApplicationId applicationId, Change change) {
applications().lockOrThrow(applicationId, application -> {
if (application.change().isPresent() && !application.deploymentJobs().hasFailures())
throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " +
application.change() + " is already in progress");
application = application.withChange(change);
if (change.application().isPresent())
application = application.withOutstandingChange(Change.empty());
applications().store(application);
});
}
/**
* Cancels any ongoing upgrade of the given application
*
* @param applicationId the application to trigger
*/
public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) {
applications().lockOrThrow(applicationId, application -> {
applications().store(application.withChange(application.change().application()
.map(Change::of)
.filter(change -> keepApplicationChange)
.orElse(Change.empty())));
if ( ! applications().require(applicationId).change().isPresent())
deploymentQueue.removeJobs(application.id());
});
}
/**
* Finds the next step to trigger for the given application, if any, and triggers it
*/
private Optional<Instant> completedAt(Application application, JobType jobType) {
return jobType.isProduction()
? changeCompletedAt(application, jobType)
: application.deploymentJobs().successAt(application.change(), jobType);
}
private boolean allowedToTriggerNow(Triggering triggering, Application application) {
if (application.deploymentJobs().isRunning(triggering.jobType, jobTimeoutLimit()))
return false;
if ( ! triggering.jobType.isProduction())
return true;
if ( ! triggering.concurrentlyWith.containsAll(JobList.from(application)
.production()
.running(jobTimeoutLimit())
.mapToList(JobStatus::type)))
return false;
if (application.change().blockedBy(application.deploymentSpec(), clock.instant()))
return false;
return true;
}
private ApplicationController applications() {
return controller.applications();
}
/** Returns the instant when the given application's current change was completed for the given job. */
private Optional<Instant> changeCompletedAt(Application application, JobType job) {
if ( ! job.isProduction())
throw new IllegalArgumentException(job + " is not a production job!");
Deployment deployment = application.deployments().get(job.zone(controller.system()).get());
if (deployment == null)
return Optional.empty();
int applicationComparison = application.change().application()
.map(version -> version.compareTo(deployment.applicationVersion()))
.orElse(0);
int platformComparison = application.change().platform()
.map(version -> version.compareTo(deployment.version()))
.orElse(0);
if (applicationComparison == -1 || platformComparison == -1)
return Optional.of(deployment.at());
return applicationComparison == 0 && platformComparison == 0 ? Optional.of(deployment.at()) : Optional.empty();
}
private boolean acceptNewApplicationVersionNow(LockedApplication application) {
if ( ! application.change().isPresent()) return true;
if (application.change().application().isPresent()) return true;
if (application.deploymentJobs().hasFailures()) return true;
if ( ! application.deploymentSpec().canUpgradeAt(clock.instant())
|| ! application.deploymentSpec().canChangeRevisionAt(clock.instant()))
return true;
return false;
}
public static class Triggering {
private final LockedApplication application;
private final JobType jobType;
private final boolean retry;
private final String reason;
private final Collection<JobType> concurrentlyWith;
public Triggering(LockedApplication application, JobType jobType, String reason, Collection<JobType> concurrentlyWith) {
this.application = application;
this.jobType = jobType;
this.concurrentlyWith = concurrentlyWith;
JobStatus status = application.deploymentJobs().jobStatus().get(jobType);
this.retry = status != null && status.jobError().filter(JobError.outOfCapacity::equals).isPresent();
this.reason = retry ? "Retrying on out of capacity" : reason;
}
public Triggering(LockedApplication application, JobType jobType, String reason) {
this(application, jobType, reason, Collections.emptySet());
}
public String toString() {
return String.format("Triggering %s for %s, deploying %s: %s", jobType, application, application.change(), reason);
}
}
} | class DeploymentTrigger {
/**
* The max duration a job may run before we consider it dead/hanging
*/
private final Duration jobTimeout;
private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName());
private final Controller controller;
private final Clock clock;
private final DeploymentQueue deploymentQueue;
private final DeploymentOrder order;
public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) {
Objects.requireNonNull(controller, "controller cannot be null");
Objects.requireNonNull(curator, "curator cannot be null");
Objects.requireNonNull(clock, "clock cannot be null");
this.controller = controller;
this.clock = clock;
this.deploymentQueue = new DeploymentQueue(controller, curator);
this.order = new DeploymentOrder(controller::system);
this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1);
}
/**
* Returns the time in the past before which jobs are at this moment considered unresponsive
*/
public Instant jobTimeoutLimit() {
return clock.instant().minus(jobTimeout);
}
public DeploymentQueue deploymentQueue() {
return deploymentQueue;
}
public DeploymentOrder deploymentOrder() {
return order;
}
/**
* Called each time a job completes (successfully or not) to record information used when deciding what to trigger.
*
* @param report information about the job that just completed
*/
public void triggerFromCompletion(JobReport report) {
applications().lockOrThrow(report.applicationId(), application -> {
ApplicationVersion applicationVersion = report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber()))
.orElse(ApplicationVersion.unknown);
application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller);
application = application.withProjectId(report.projectId());
if (report.jobType() == JobType.component && report.success()) {
if ( ! acceptNewApplicationVersionNow(application))
application = application.withOutstandingChange(Change.of(applicationVersion));
else
application = application.withChange(application.change().with(applicationVersion));
}
applications().store(application);
});
}
/**
* Find jobs that can and should run but are currently not.
*/
public void triggerReadyJobs() {
ApplicationList applications = ApplicationList.from(applications().asList());
applications = applications.notPullRequest()
.withProjectId()
.deploying();
for (Application application : applications.asList())
applications().lockIfPresent(application.id(), this::triggerReadyJobs);
}
/**
* Trigger a job for an application, if allowed
*
* @param triggering the triggering to execute, i.e., application, job type and reason
* @return the application in the triggered state, if actually triggered. This *must* be stored by the caller
*/
public LockedApplication trigger(Triggering triggering, LockedApplication application) {
if ( ! application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), application.change())) {
log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", triggering.jobType,
application, triggering.reason));
return application;
}
log.info(triggering.toString());
deploymentQueue.addJob(application.id(), triggering.jobType, triggering.retry);
return application.withJobTriggering(triggering.jobType,
clock.instant(),
application.deployVersionFor(triggering.jobType, controller),
application.deployApplicationVersionFor(triggering.jobType, controller, false)
.orElse(ApplicationVersion.unknown),
triggering.reason);
}
/**
* Triggers a change of this application
*
* @param applicationId the application to trigger
* @throws IllegalArgumentException if this application already have an ongoing change
*/
public void triggerChange(ApplicationId applicationId, Change change) {
applications().lockOrThrow(applicationId, application -> {
if (application.change().isPresent() && !application.deploymentJobs().hasFailures())
throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " +
application.change() + " is already in progress");
application = application.withChange(change);
if (change.application().isPresent())
application = application.withOutstandingChange(Change.empty());
applications().store(application);
});
}
/**
* Cancels any ongoing upgrade of the given application
*
* @param applicationId the application to trigger
*/
public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) {
applications().lockOrThrow(applicationId, application -> {
applications().store(application.withChange(application.change().application()
.map(Change::of)
.filter(change -> keepApplicationChange)
.orElse(Change.empty())));
if ( ! applications().require(applicationId).change().isPresent())
deploymentQueue.removeJobs(application.id());
});
}
/**
* Finds the next step to trigger for the given application, if any, and triggers it
*/
private Optional<Instant> completedAt(Application application, JobType jobType) {
return jobType.isProduction()
? changeCompletedAt(application, jobType)
: application.deploymentJobs().successAt(application.change(), jobType);
}
private boolean allowedToTriggerNow(Triggering triggering, Application application) {
if (application.deploymentJobs().isRunning(triggering.jobType, jobTimeoutLimit()))
return false;
if ( ! triggering.jobType.isProduction())
return true;
if ( ! triggering.concurrentlyWith.containsAll(JobList.from(application)
.production()
.running(jobTimeoutLimit())
.mapToList(JobStatus::type)))
return false;
if (application.change().blockedBy(application.deploymentSpec(), clock.instant()))
return false;
return true;
}
private ApplicationController applications() {
return controller.applications();
}
/** Returns the instant when the given application's current change was completed for the given job. */
private Optional<Instant> changeCompletedAt(Application application, JobType job) {
if ( ! job.isProduction())
throw new IllegalArgumentException(job + " is not a production job!");
Deployment deployment = application.deployments().get(job.zone(controller.system()).get());
if (deployment == null)
return Optional.empty();
int applicationComparison = application.change().application()
.map(version -> version.compareTo(deployment.applicationVersion()))
.orElse(0);
int platformComparison = application.change().platform()
.map(version -> version.compareTo(deployment.version()))
.orElse(0);
return Optional.of(deployment.at())
.filter(ignored -> applicationComparison == -1 || platformComparison == -1
|| (applicationComparison == 0 && platformComparison == 0));
}
private boolean acceptNewApplicationVersionNow(LockedApplication application) {
if ( ! application.change().isPresent()) return true;
if (application.change().application().isPresent()) return true;
if (application.deploymentJobs().hasFailures()) return true;
if ( ! application.deploymentSpec().canUpgradeAt(clock.instant())
|| ! application.deploymentSpec().canChangeRevisionAt(clock.instant()))
return true;
return false;
}
public static class Triggering {
private final LockedApplication application;
private final JobType jobType;
private final boolean retry;
private final String reason;
private final Collection<JobType> concurrentlyWith;
public Triggering(LockedApplication application, JobType jobType, String reason, Collection<JobType> concurrentlyWith) {
this.application = application;
this.jobType = jobType;
this.concurrentlyWith = concurrentlyWith;
JobStatus status = application.deploymentJobs().jobStatus().get(jobType);
this.retry = status != null && status.jobError().filter(JobError.outOfCapacity::equals).isPresent();
this.reason = retry ? "Retrying on out of capacity" : reason;
}
public Triggering(LockedApplication application, JobType jobType, String reason) {
this(application, jobType, reason, Collections.emptySet());
}
public String toString() {
return String.format("Triggering %s for %s, deploying %s: %s", jobType, application, application.change(), reason);
}
}
} |
Sure! | private Optional<Instant> changeCompletedAt(Application application, JobType job) {
if ( ! job.isProduction())
throw new IllegalArgumentException(job + " is not a production job!");
Deployment deployment = application.deployments().get(job.zone(controller.system()).get());
if (deployment == null)
return Optional.empty();
int applicationComparison = application.change().application()
.map(version -> version.compareTo(deployment.applicationVersion()))
.orElse(0);
int platformComparison = application.change().platform()
.map(version -> version.compareTo(deployment.version()))
.orElse(0);
if (applicationComparison == -1 || platformComparison == -1)
return Optional.of(deployment.at());
return applicationComparison == 0 && platformComparison == 0 ? Optional.of(deployment.at()) : Optional.empty();
} | return applicationComparison == 0 && platformComparison == 0 ? Optional.of(deployment.at()) : Optional.empty(); | private Optional<Instant> changeCompletedAt(Application application, JobType job) {
if ( ! job.isProduction())
throw new IllegalArgumentException(job + " is not a production job!");
Deployment deployment = application.deployments().get(job.zone(controller.system()).get());
if (deployment == null)
return Optional.empty();
int applicationComparison = application.change().application()
.map(version -> version.compareTo(deployment.applicationVersion()))
.orElse(0);
int platformComparison = application.change().platform()
.map(version -> version.compareTo(deployment.version()))
.orElse(0);
return Optional.of(deployment.at())
.filter(ignored -> applicationComparison == -1 || platformComparison == -1
|| (applicationComparison == 0 && platformComparison == 0));
} | class DeploymentTrigger {
/**
* The max duration a job may run before we consider it dead/hanging
*/
private final Duration jobTimeout;
private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName());
private final Controller controller;
private final Clock clock;
private final DeploymentQueue deploymentQueue;
private final DeploymentOrder order;
public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) {
Objects.requireNonNull(controller, "controller cannot be null");
Objects.requireNonNull(curator, "curator cannot be null");
Objects.requireNonNull(clock, "clock cannot be null");
this.controller = controller;
this.clock = clock;
this.deploymentQueue = new DeploymentQueue(controller, curator);
this.order = new DeploymentOrder(controller);
this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1);
}
/**
* Returns the time in the past before which jobs are at this moment considered unresponsive
*/
public Instant jobTimeoutLimit() {
return clock.instant().minus(jobTimeout);
}
public DeploymentQueue deploymentQueue() {
return deploymentQueue;
}
public DeploymentOrder deploymentOrder() {
return order;
}
/**
* Called each time a job completes (successfully or not) to record information used when deciding what to trigger.
*
* @param report information about the job that just completed
*/
public void triggerFromCompletion(JobReport report) {
applications().lockOrThrow(report.applicationId(), application -> {
ApplicationVersion applicationVersion = report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber()))
.orElse(ApplicationVersion.unknown);
application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller);
application = application.withProjectId(report.projectId());
if (report.jobType() == JobType.component && report.success()) {
if ( ! acceptNewApplicationVersionNow(application))
application = application.withOutstandingChange(Change.of(applicationVersion));
else
application = application.withChange(application.change().with(applicationVersion));
}
applications().store(application);
});
}
/**
* Find jobs that can and should run but are currently not.
*/
public void triggerReadyJobs() {
ApplicationList applications = ApplicationList.from(applications().asList());
applications = applications.notPullRequest()
.withProjectId()
.deploying();
for (Application application : applications.asList())
applications().lockIfPresent(application.id(), this::triggerReadyJobs);
}
/**
* Trigger a job for an application, if allowed
*
* @param triggering the triggering to execute, i.e., application, job type and reason
* @return the application in the triggered state, if actually triggered. This *must* be stored by the caller
*/
public LockedApplication trigger(Triggering triggering, LockedApplication application) {
if ( ! application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), application.change())) {
log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", triggering.jobType,
application, triggering.reason));
return application;
}
log.info(triggering.toString());
deploymentQueue.addJob(application.id(), triggering.jobType, triggering.retry);
return application.withJobTriggering(triggering.jobType,
clock.instant(),
application.deployVersionFor(triggering.jobType, controller),
application.deployApplicationVersionFor(triggering.jobType, controller, false)
.orElse(ApplicationVersion.unknown),
triggering.reason);
}
/**
* Triggers a change of this application
*
* @param applicationId the application to trigger
* @throws IllegalArgumentException if this application already have an ongoing change
*/
public void triggerChange(ApplicationId applicationId, Change change) {
applications().lockOrThrow(applicationId, application -> {
if (application.change().isPresent() && !application.deploymentJobs().hasFailures())
throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " +
application.change() + " is already in progress");
application = application.withChange(change);
if (change.application().isPresent())
application = application.withOutstandingChange(Change.empty());
applications().store(application);
});
}
/**
* Cancels any ongoing upgrade of the given application
*
* @param applicationId the application to trigger
*/
public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) {
applications().lockOrThrow(applicationId, application -> {
applications().store(application.withChange(application.change().application()
.map(Change::of)
.filter(change -> keepApplicationChange)
.orElse(Change.empty())));
if ( ! applications().require(applicationId).change().isPresent())
deploymentQueue.removeJobs(application.id());
});
}
/**
* Finds the next step to trigger for the given application, if any, and triggers it
*/
private void triggerReadyJobs(LockedApplication application) {
List<Triggering> triggerings = new ArrayList<>();
Change change = application.change();
List<DeploymentSpec.Step> steps = application.deploymentSpec().steps();
if (steps.isEmpty()) steps = Collections.singletonList(new DeploymentSpec.DeclaredZone(Environment.test));
Optional<Instant> completedAt = Optional.of(clock.instant());
String reason = "Deploying " + change.toString();
for (DeploymentSpec.Step step : steps) {
LockedApplication app = application;
Collection<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet());
Collection<JobType> remainingJobs = stepJobs.stream().filter(job -> ! completedAt(app, job).isPresent()).collect(toList());
if (remainingJobs.isEmpty()) {
if (stepJobs.isEmpty()) {
Duration delay = ((DeploymentSpec.Delay) step).duration();
completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant()));
reason += " after a delay of " + delay;
}
else {
completedAt = stepJobs.stream().map(job -> completedAt(app, job).get()).max(naturalOrder());
reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", "));
}
}
else if (completedAt.isPresent()) {
for (JobType job : remainingJobs)
triggerings.add(new Triggering(app, job, reason, stepJobs));
completedAt = Optional.empty();
}
}
if (completedAt.isPresent())
application = application.withChange(Change.empty());
for (Triggering triggering : triggerings)
if (application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), change) && allowedToTriggerNow(triggering, application))
application = trigger(triggering, application);
applications().store(application);
}
private Optional<Instant> completedAt(Application application, JobType jobType) {
return jobType.isProduction()
? changeCompletedAt(application, jobType)
: application.deploymentJobs().successAt(application.change(), jobType);
}
private boolean allowedToTriggerNow(Triggering triggering, Application application) {
if (application.deploymentJobs().isRunning(triggering.jobType, jobTimeoutLimit()))
return false;
if ( ! triggering.jobType.isProduction())
return true;
if ( ! triggering.concurrentlyWith.containsAll(JobList.from(application)
.production()
.running(jobTimeoutLimit())
.mapToList(JobStatus::type)))
return false;
if (application.change().blockedBy(application.deploymentSpec(), clock.instant()))
return false;
return true;
}
private ApplicationController applications() {
return controller.applications();
}
/** Returns the instant when the given application's current change was completed for the given job. */
private boolean acceptNewApplicationVersionNow(LockedApplication application) {
if ( ! application.change().isPresent()) return true;
if (application.change().application().isPresent()) return true;
if (application.deploymentJobs().hasFailures()) return true;
if ( ! application.deploymentSpec().canUpgradeAt(clock.instant())
|| ! application.deploymentSpec().canChangeRevisionAt(clock.instant()))
return true;
return false;
}
public static class Triggering {
private final LockedApplication application;
private final JobType jobType;
private final boolean retry;
private final String reason;
private final Collection<JobType> concurrentlyWith;
public Triggering(LockedApplication application, JobType jobType, String reason, Collection<JobType> concurrentlyWith) {
this.application = application;
this.jobType = jobType;
this.concurrentlyWith = concurrentlyWith;
JobStatus status = application.deploymentJobs().jobStatus().get(jobType);
this.retry = status != null && status.jobError().filter(JobError.outOfCapacity::equals).isPresent();
this.reason = retry ? "Retrying on out of capacity" : reason;
}
public Triggering(LockedApplication application, JobType jobType, String reason) {
this(application, jobType, reason, Collections.emptySet());
}
public String toString() {
return String.format("Triggering %s for %s, deploying %s: %s", jobType, application, application.change(), reason);
}
}
} | class DeploymentTrigger {
/**
* The max duration a job may run before we consider it dead/hanging
*/
private final Duration jobTimeout;
private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName());
private final Controller controller;
private final Clock clock;
private final DeploymentQueue deploymentQueue;
private final DeploymentOrder order;
public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) {
Objects.requireNonNull(controller, "controller cannot be null");
Objects.requireNonNull(curator, "curator cannot be null");
Objects.requireNonNull(clock, "clock cannot be null");
this.controller = controller;
this.clock = clock;
this.deploymentQueue = new DeploymentQueue(controller, curator);
this.order = new DeploymentOrder(controller::system);
this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1);
}
/**
* Returns the time in the past before which jobs are at this moment considered unresponsive
*/
public Instant jobTimeoutLimit() {
return clock.instant().minus(jobTimeout);
}
public DeploymentQueue deploymentQueue() {
return deploymentQueue;
}
public DeploymentOrder deploymentOrder() {
return order;
}
/**
* Called each time a job completes (successfully or not) to record information used when deciding what to trigger.
*
* @param report information about the job that just completed
*/
public void triggerFromCompletion(JobReport report) {
applications().lockOrThrow(report.applicationId(), application -> {
ApplicationVersion applicationVersion = report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber()))
.orElse(ApplicationVersion.unknown);
application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller);
application = application.withProjectId(report.projectId());
if (report.jobType() == JobType.component && report.success()) {
if ( ! acceptNewApplicationVersionNow(application))
application = application.withOutstandingChange(Change.of(applicationVersion));
else
application = application.withChange(application.change().with(applicationVersion));
}
applications().store(application);
});
}
/**
* Find jobs that can and should run but are currently not.
*/
public void triggerReadyJobs() {
ApplicationList applications = ApplicationList.from(applications().asList());
applications = applications.notPullRequest()
.withProjectId()
.deploying();
for (Application application : applications.asList())
applications().lockIfPresent(application.id(), this::triggerReadyJobs);
}
/**
* Trigger a job for an application, if allowed
*
* @param triggering the triggering to execute, i.e., application, job type and reason
* @return the application in the triggered state, if actually triggered. This *must* be stored by the caller
*/
public LockedApplication trigger(Triggering triggering, LockedApplication application) {
if ( ! application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), application.change())) {
log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", triggering.jobType,
application, triggering.reason));
return application;
}
log.info(triggering.toString());
deploymentQueue.addJob(application.id(), triggering.jobType, triggering.retry);
return application.withJobTriggering(triggering.jobType,
clock.instant(),
application.deployVersionFor(triggering.jobType, controller),
application.deployApplicationVersionFor(triggering.jobType, controller, false)
.orElse(ApplicationVersion.unknown),
triggering.reason);
}
/**
* Triggers a change of this application
*
* @param applicationId the application to trigger
* @throws IllegalArgumentException if this application already have an ongoing change
*/
public void triggerChange(ApplicationId applicationId, Change change) {
applications().lockOrThrow(applicationId, application -> {
if (application.change().isPresent() && !application.deploymentJobs().hasFailures())
throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " +
application.change() + " is already in progress");
application = application.withChange(change);
if (change.application().isPresent())
application = application.withOutstandingChange(Change.empty());
applications().store(application);
});
}
/**
* Cancels any ongoing upgrade of the given application
*
* @param applicationId the application to trigger
*/
public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) {
applications().lockOrThrow(applicationId, application -> {
applications().store(application.withChange(application.change().application()
.map(Change::of)
.filter(change -> keepApplicationChange)
.orElse(Change.empty())));
if ( ! applications().require(applicationId).change().isPresent())
deploymentQueue.removeJobs(application.id());
});
}
/**
* Finds the next step to trigger for the given application, if any, and triggers it
*/
private void triggerReadyJobs(LockedApplication application) {
List<Triggering> triggerings = new ArrayList<>();
Change change = application.change();
List<DeploymentSpec.Step> steps = application.deploymentSpec().steps();
if (steps.isEmpty()) steps = Collections.singletonList(new DeploymentSpec.DeclaredZone(Environment.test));
Optional<Instant> completedAt = Optional.of(clock.instant());
String reason = "Deploying " + change.toString();
for (DeploymentSpec.Step step : steps) {
LockedApplication app = application;
Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet());
Set<JobType> remainingJobs = stepJobs.stream().filter(job -> ! completedAt(app, job).isPresent()).collect(toSet());
if (remainingJobs.isEmpty()) {
if (stepJobs.isEmpty()) {
Duration delay = ((DeploymentSpec.Delay) step).duration();
completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant()));
reason += " after a delay of " + delay;
}
else {
completedAt = stepJobs.stream().map(job -> completedAt(app, job).get()).max(naturalOrder());
reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", "));
}
}
else if (completedAt.isPresent()) {
for (JobType job : remainingJobs)
triggerings.add(new Triggering(app, job, reason, stepJobs));
completedAt = Optional.empty();
}
}
if (completedAt.isPresent())
application = application.withChange(Change.empty());
for (Triggering triggering : triggerings)
if (application.deploymentJobs().isDeployableTo(triggering.jobType.environment(), change) && allowedToTriggerNow(triggering, application))
application = trigger(triggering, application);
applications().store(application);
}
private Optional<Instant> completedAt(Application application, JobType jobType) {
return jobType.isProduction()
? changeCompletedAt(application, jobType)
: application.deploymentJobs().successAt(application.change(), jobType);
}
private boolean allowedToTriggerNow(Triggering triggering, Application application) {
if (application.deploymentJobs().isRunning(triggering.jobType, jobTimeoutLimit()))
return false;
if ( ! triggering.jobType.isProduction())
return true;
if ( ! triggering.concurrentlyWith.containsAll(JobList.from(application)
.production()
.running(jobTimeoutLimit())
.mapToList(JobStatus::type)))
return false;
if (application.change().blockedBy(application.deploymentSpec(), clock.instant()))
return false;
return true;
}
private ApplicationController applications() {
return controller.applications();
}
/** Returns the instant when the given application's current change was completed for the given job. */
private boolean acceptNewApplicationVersionNow(LockedApplication application) {
if ( ! application.change().isPresent()) return true;
if (application.change().application().isPresent()) return true;
if (application.deploymentJobs().hasFailures()) return true;
if ( ! application.deploymentSpec().canUpgradeAt(clock.instant())
|| ! application.deploymentSpec().canChangeRevisionAt(clock.instant()))
return true;
return false;
}
public static class Triggering {
private final LockedApplication application;
private final JobType jobType;
private final boolean retry;
private final String reason;
private final Collection<JobType> concurrentlyWith;
public Triggering(LockedApplication application, JobType jobType, String reason, Collection<JobType> concurrentlyWith) {
this.application = application;
this.jobType = jobType;
this.concurrentlyWith = concurrentlyWith;
JobStatus status = application.deploymentJobs().jobStatus().get(jobType);
this.retry = status != null && status.jobError().filter(JobError.outOfCapacity::equals).isPresent();
this.reason = retry ? "Retrying on out of capacity" : reason;
}
public Triggering(LockedApplication application, JobType jobType, String reason) {
this(application, jobType, reason, Collections.emptySet());
}
public String toString() {
return String.format("Triggering %s for %s, deploying %s: %s", jobType, application, application.change(), reason);
}
}
} |
Are all non-user tenants athenz tenants now? | private void confirmApplicationOwnerships() {
ApplicationList.from(controller().applications().asList())
.notPullRequest()
.hasProductionDeployment()
.asList()
.forEach(application -> {
try {
Tenant tenant = ownerOf(application.id());
Optional<IssueId> ourIssueId = application.ownershipIssueId();
ourIssueId = tenant instanceof AthenzTenant
? ownershipIssues.confirmOwnership(ourIssueId, application.id(), propertyIdFor((AthenzTenant) tenant))
: ownershipIssues.confirmOwnership(ourIssueId, application.id(), userFor(tenant));
ourIssueId.ifPresent(issueId -> store(issueId, application.id()));
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Exception caught when attempting to file an issue for " + application.id(), e);
}
});
} | : ownershipIssues.confirmOwnership(ourIssueId, application.id(), userFor(tenant)); | private void confirmApplicationOwnerships() {
ApplicationList.from(controller().applications().asList())
.notPullRequest()
.hasProductionDeployment()
.asList()
.forEach(application -> {
try {
Tenant tenant = ownerOf(application.id());
Optional<IssueId> ourIssueId = application.ownershipIssueId();
ourIssueId = tenant instanceof AthenzTenant
? ownershipIssues.confirmOwnership(ourIssueId, application.id(), propertyIdFor((AthenzTenant) tenant))
: ownershipIssues.confirmOwnership(ourIssueId, application.id(), userFor(tenant));
ourIssueId.ifPresent(issueId -> store(issueId, application.id()));
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Exception caught when attempting to file an issue for " + application.id(), e);
}
});
} | class ApplicationOwnershipConfirmer extends Maintainer {
private final OwnershipIssues ownershipIssues;
public ApplicationOwnershipConfirmer(Controller controller, Duration interval, JobControl jobControl, OwnershipIssues ownershipIssues) {
super(controller, interval, jobControl);
this.ownershipIssues = ownershipIssues;
}
@Override
protected void maintain() {
confirmApplicationOwnerships();
ensureConfirmationResponses();
}
/** File an ownership issue with the owners of all applications we know about. */
/** Escalate ownership issues which have not been closed before a defined amount of time has passed. */
private void ensureConfirmationResponses() {
for (Application application : controller().applications().asList())
application.ownershipIssueId().ifPresent(issueId -> {
try {
Optional<PropertyId> propertyId = Optional.of(application.id())
.map(this::ownerOf)
.filter(t -> t instanceof AthenzTenant)
.map(AthenzTenant.class::cast)
.flatMap(AthenzTenant::propertyId);
ownershipIssues.ensureResponse(issueId, propertyId);
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Exception caught when attempting to escalate issue with id " + issueId, e);
}
});
}
private Tenant ownerOf(ApplicationId applicationId) {
return controller().tenants().tenant(applicationId.tenant())
.orElseThrow(() -> new IllegalStateException("No tenant found for application " + applicationId));
}
protected User userFor(Tenant tenant) {
return User.from(tenant.name().value().replaceFirst(Tenant.userPrefix, ""));
}
protected PropertyId propertyIdFor(AthenzTenant tenant) {
return tenant.propertyId()
.orElseThrow(() -> new NoSuchElementException("No PropertyId is listed for non-user tenant " +
tenant));
}
protected void store(IssueId issueId, ApplicationId applicationId) {
controller().applications().lockIfPresent(applicationId, application ->
controller().applications().store(application.withOwnershipIssueId(issueId)));
}
} | class ApplicationOwnershipConfirmer extends Maintainer {
private final OwnershipIssues ownershipIssues;
public ApplicationOwnershipConfirmer(Controller controller, Duration interval, JobControl jobControl, OwnershipIssues ownershipIssues) {
super(controller, interval, jobControl);
this.ownershipIssues = ownershipIssues;
}
@Override
protected void maintain() {
confirmApplicationOwnerships();
ensureConfirmationResponses();
}
/** File an ownership issue with the owners of all applications we know about. */
/** Escalate ownership issues which have not been closed before a defined amount of time has passed. */
private void ensureConfirmationResponses() {
for (Application application : controller().applications().asList())
application.ownershipIssueId().ifPresent(issueId -> {
try {
Optional<PropertyId> propertyId = Optional.of(application.id())
.map(this::ownerOf)
.filter(t -> t instanceof AthenzTenant)
.map(AthenzTenant.class::cast)
.flatMap(AthenzTenant::propertyId);
ownershipIssues.ensureResponse(issueId, propertyId);
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Exception caught when attempting to escalate issue with id " + issueId, e);
}
});
}
private Tenant ownerOf(ApplicationId applicationId) {
return controller().tenants().tenant(applicationId.tenant())
.orElseThrow(() -> new IllegalStateException("No tenant found for application " + applicationId));
}
protected User userFor(Tenant tenant) {
return User.from(tenant.name().value().replaceFirst(Tenant.userPrefix, ""));
}
protected PropertyId propertyIdFor(AthenzTenant tenant) {
return tenant.propertyId()
.orElseThrow(() -> new NoSuchElementException("No PropertyId is listed for non-user tenant " +
tenant));
}
protected void store(IssueId issueId, ApplicationId applicationId) {
controller().applications().lockIfPresent(applicationId, application ->
controller().applications().store(application.withOwnershipIssueId(issueId)));
}
} |
Not a big deal, though, since exceptions are caught per application. | private void confirmApplicationOwnerships() {
ApplicationList.from(controller().applications().asList())
.notPullRequest()
.hasProductionDeployment()
.asList()
.forEach(application -> {
try {
Tenant tenant = ownerOf(application.id());
Optional<IssueId> ourIssueId = application.ownershipIssueId();
ourIssueId = tenant instanceof AthenzTenant
? ownershipIssues.confirmOwnership(ourIssueId, application.id(), propertyIdFor((AthenzTenant) tenant))
: ownershipIssues.confirmOwnership(ourIssueId, application.id(), userFor(tenant));
ourIssueId.ifPresent(issueId -> store(issueId, application.id()));
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Exception caught when attempting to file an issue for " + application.id(), e);
}
});
} | : ownershipIssues.confirmOwnership(ourIssueId, application.id(), userFor(tenant)); | private void confirmApplicationOwnerships() {
ApplicationList.from(controller().applications().asList())
.notPullRequest()
.hasProductionDeployment()
.asList()
.forEach(application -> {
try {
Tenant tenant = ownerOf(application.id());
Optional<IssueId> ourIssueId = application.ownershipIssueId();
ourIssueId = tenant instanceof AthenzTenant
? ownershipIssues.confirmOwnership(ourIssueId, application.id(), propertyIdFor((AthenzTenant) tenant))
: ownershipIssues.confirmOwnership(ourIssueId, application.id(), userFor(tenant));
ourIssueId.ifPresent(issueId -> store(issueId, application.id()));
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Exception caught when attempting to file an issue for " + application.id(), e);
}
});
} | class ApplicationOwnershipConfirmer extends Maintainer {
private final OwnershipIssues ownershipIssues;
public ApplicationOwnershipConfirmer(Controller controller, Duration interval, JobControl jobControl, OwnershipIssues ownershipIssues) {
super(controller, interval, jobControl);
this.ownershipIssues = ownershipIssues;
}
@Override
protected void maintain() {
confirmApplicationOwnerships();
ensureConfirmationResponses();
}
/** File an ownership issue with the owners of all applications we know about. */
/** Escalate ownership issues which have not been closed before a defined amount of time has passed. */
private void ensureConfirmationResponses() {
for (Application application : controller().applications().asList())
application.ownershipIssueId().ifPresent(issueId -> {
try {
Optional<PropertyId> propertyId = Optional.of(application.id())
.map(this::ownerOf)
.filter(t -> t instanceof AthenzTenant)
.map(AthenzTenant.class::cast)
.flatMap(AthenzTenant::propertyId);
ownershipIssues.ensureResponse(issueId, propertyId);
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Exception caught when attempting to escalate issue with id " + issueId, e);
}
});
}
private Tenant ownerOf(ApplicationId applicationId) {
return controller().tenants().tenant(applicationId.tenant())
.orElseThrow(() -> new IllegalStateException("No tenant found for application " + applicationId));
}
protected User userFor(Tenant tenant) {
return User.from(tenant.name().value().replaceFirst(Tenant.userPrefix, ""));
}
protected PropertyId propertyIdFor(AthenzTenant tenant) {
return tenant.propertyId()
.orElseThrow(() -> new NoSuchElementException("No PropertyId is listed for non-user tenant " +
tenant));
}
protected void store(IssueId issueId, ApplicationId applicationId) {
controller().applications().lockIfPresent(applicationId, application ->
controller().applications().store(application.withOwnershipIssueId(issueId)));
}
} | class ApplicationOwnershipConfirmer extends Maintainer {
private final OwnershipIssues ownershipIssues;
public ApplicationOwnershipConfirmer(Controller controller, Duration interval, JobControl jobControl, OwnershipIssues ownershipIssues) {
super(controller, interval, jobControl);
this.ownershipIssues = ownershipIssues;
}
@Override
protected void maintain() {
confirmApplicationOwnerships();
ensureConfirmationResponses();
}
/** File an ownership issue with the owners of all applications we know about. */
/** Escalate ownership issues which have not been closed before a defined amount of time has passed. */
private void ensureConfirmationResponses() {
for (Application application : controller().applications().asList())
application.ownershipIssueId().ifPresent(issueId -> {
try {
Optional<PropertyId> propertyId = Optional.of(application.id())
.map(this::ownerOf)
.filter(t -> t instanceof AthenzTenant)
.map(AthenzTenant.class::cast)
.flatMap(AthenzTenant::propertyId);
ownershipIssues.ensureResponse(issueId, propertyId);
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Exception caught when attempting to escalate issue with id " + issueId, e);
}
});
}
private Tenant ownerOf(ApplicationId applicationId) {
return controller().tenants().tenant(applicationId.tenant())
.orElseThrow(() -> new IllegalStateException("No tenant found for application " + applicationId));
}
protected User userFor(Tenant tenant) {
return User.from(tenant.name().value().replaceFirst(Tenant.userPrefix, ""));
}
protected PropertyId propertyIdFor(AthenzTenant tenant) {
return tenant.propertyId()
.orElseThrow(() -> new NoSuchElementException("No PropertyId is listed for non-user tenant " +
tenant));
}
protected void store(IssueId issueId, ApplicationId applicationId) {
controller().applications().lockIfPresent(applicationId, application ->
controller().applications().store(application.withOwnershipIssueId(issueId)));
}
} |
Test name is `handleSingleNonSearchPersistentVds` (emphasis "Vds"); should we remove it entirely or just rename it? | public void handleSingleNonSearchPersistentVds() throws Exception {
ContentCluster a = createContent(
"<content version =\"1.0\" id=\"a\">"+
" <redundancy>3</redundancy>"+
" <documents>" +
" <document type=\"music\" mode=\"store-only\"/>" +
" </documents>" +
" <group>"+
" <node hostalias=\"mockhost\" distribution-key=\"0\"/>"+
" </group>"+
"</content>");
ContentSearchCluster s = a.getSearch();
assertFalse(s.hasIndexedCluster());
assertTrue(s.getClusters().isEmpty());
assertTrue(a.getPersistence() instanceof ProtonEngine.Factory);
assertEquals(1, a.getStorageNodes().getChildren().size());
} | assertTrue(a.getPersistence() instanceof ProtonEngine.Factory); | public void handleSingleNonSearchPersistentVds() throws Exception {
ContentCluster a = createContent(
"<content version =\"1.0\" id=\"a\">"+
" <redundancy>3</redundancy>"+
" <documents>" +
" <document type=\"music\" mode=\"store-only\"/>" +
" </documents>" +
" <group>"+
" <node hostalias=\"mockhost\" distribution-key=\"0\"/>"+
" </group>"+
"</content>");
ContentSearchCluster s = a.getSearch();
assertFalse(s.hasIndexedCluster());
assertTrue(s.getClusters().isEmpty());
assertTrue(a.getPersistence() instanceof ProtonEngine.Factory);
assertEquals(1, a.getStorageNodes().getChildren().size());
} | class ContentBuilderTest extends DomBuilderTest {
private ContentCluster createContent(String xml) throws Exception {
String combined = "" +
"<services>"+
" <admin version='2.0'>" +
" <adminserver hostalias='mockhost'/>" +
" </admin>" +
xml +
"</services>";
VespaModel m = new VespaModelCreatorWithMockPkg(new MockApplicationPackage.Builder()
.withHosts(getHosts())
.withServices(combined)
.withSearchDefinition(MockApplicationPackage.MUSIC_SEARCHDEFINITION)
.build())
.create();
return m.getContentClusters().isEmpty()
? null
: m.getContentClusters().values().iterator().next();
}
private ContentCluster createContentWithBooksToo(String xml) throws Exception {
String combined = "" +
"<services>"+
" <admin version='2.0'>" +
" <adminserver hostalias='mockhost'/>" +
" </admin>" +
xml +
"</services>";
VespaModel m = new VespaModelCreatorWithMockPkg(new MockApplicationPackage.Builder()
.withHosts(getHosts())
.withServices(combined)
.withSearchDefinitions(Arrays.asList(MockApplicationPackage.MUSIC_SEARCHDEFINITION,
MockApplicationPackage.BOOK_SEARCHDEFINITION))
.build())
.create();
return m.getContentClusters().isEmpty()
? null
: m.getContentClusters().values().iterator().next();
}
private String getHosts() {
return "<?xml version='1.0' encoding='utf-8' ?>" +
"<hosts>" +
" <host name='node0'>" +
" <alias>mockhost</alias>" +
" </host>" +
" <host name='node1'>" +
" <alias>mockhost2</alias>" +
" </host>" +
" <host name='node2'>" +
" <alias>mockhost3</alias>" +
" </host>" +
"</hosts>";
}
private String getServices(String groupXml) {
return getConfigOverrideServices(groupXml, "");
}
private String getConfigOverrideServices(String groupXml, String documentOverrides) {
return "" +
"<services>"+
" <admin version='2.0'>" +
" <adminserver hostalias='mockhost'/>" +
" </admin>" +
" <jdisc version='1.0' id='qrc'>" +
" <search/>" +
" <nodes>" +
" <node hostalias='mockhost' />" +
" </nodes>" +
" </jdisc>" +
" <content version='1.0' id='clu'>" +
" <documents>" +
" <document type='music' mode='index'>" +
documentOverrides +
" </document>" +
" </documents>" +
" <redundancy>3</redundancy>"+
" <engine>" +
" <proton>" +
" <query-timeout>7.3</query-timeout>" +
" </proton>" +
" </engine>" +
" <group>"+
groupXml +
" </group>"+
" </content>" +
"</services>";
}
private String getBasicServices() {
return getServices("<node hostalias='mockhost' distribution-key='0'/>");
}
public static void assertServices(HostResource host, String [] services) {
String missing = "";
for (String s : services) {
if (host.getService(s) == null) {
missing += s + ",";
}
}
String extra = "";
for (Service s : host.getServices()) {
boolean found = false;
for (String n : services) {
if (n.equals(s.getServiceName())) {
found = true;
}
}
if (!found) {
extra += s.getServiceName() + ",";
}
}
assertEquals("Missing: Extra: ", "Missing: " + missing+ " Extra: " + extra);
assertEquals(services.length, host.getServices().size());
}
@Test
public void handleSingleNonSearchPersistentDummy() throws Exception {
ContentCluster a = createContent(
"<content version =\"1.0\" id=\"a\">"+
" <redundancy>3</redundancy>"+
" <documents>" +
" <document type=\"music\" mode=\"store-only\"/>" +
" </documents>" +
" <engine>"+
" <dummy/>"+
" </engine>"+
" <group>"+
" <node hostalias=\"mockhost\" distribution-key=\"0\"/>"+
" </group>"+
"</content>");
ContentSearchCluster s = a.getSearch();
assertFalse(s.hasIndexedCluster());
assertTrue(s.getClusters().isEmpty());
assertTrue(a.getPersistence() instanceof com.yahoo.vespa.model.content.engines.DummyPersistence.Factory);
}
@Test
@Test
public void handleSingleNonSearchPersistentProton() throws Exception {
ContentCluster a = createContent(
"<content version =\"1.0\" id=\"a\">"+
" <redundancy>3</redundancy>"+
" <documents>" +
" <document type=\"music\" mode=\"store-only\"/>" +
" </documents>" +
" <engine>"+
" <proton/>"+
" </engine>"+
" <group>"+
" <node hostalias=\"mockhost\" distribution-key=\"0\"/>"+
" </group>"+
"</content>");
ContentSearchCluster s = a.getSearch();
assertFalse(s.hasIndexedCluster());
assertTrue(s.getClusters().isEmpty());
assertTrue(a.getPersistence() instanceof ProtonEngine.Factory);
assertEquals(1, a.getStorageNodes().getChildren().size());
}
@Test
public void handleSingleNonSearchNonPersistentCluster() throws Exception {
ContentCluster a = createContent(
"<content version =\"1.0\" id=\"a\">"+
" <redundancy>3</redundancy>"+
" <documents>" +
" <document type=\"music\" mode=\"store-only\"/>" +
" </documents>" +
" <group>"+
" <node hostalias=\"mockhost\" distribution-key=\"0\"/>"+
" </group>"+
"</content>");
ContentSearchCluster s = a.getSearch();
assertFalse(s.hasIndexedCluster());
assertTrue(s.getClusters().isEmpty());
assertNull(s.getIndexed());
assertNull(a.getRootGroup().getName());
assertNull(a.getRootGroup().getIndex());
assertTrue(a.getRootGroup().getSubgroups().isEmpty());
assertEquals(1, a.getRootGroup().getNodes().size());
assertEquals("node0", a.getRootGroup().getNodes().get(0).getHostName());
assertTrue(a.getPersistence() instanceof ProtonEngine.Factory);
assertEquals(1, a.getStorageNodes().getChildren().size());
assertEquals("a", a.getConfigId());
}
@Test
public void handleIndexedOnlyWithoutPersistence() throws Exception {
VespaModel m = new VespaModelCreatorWithMockPkg(createAppWithMusic(getHosts(), getBasicServices())).create();
ContentCluster c = CollectionUtil.first(m.getContentClusters().values());
ContentSearchCluster s = c.getSearch();
assertTrue(s.hasIndexedCluster());
assertEquals(1, s.getClusters().size());
assertNotNull(s.getIndexed());
assertEquals("clu", s.getIndexed().getClusterName());
assertEquals(7.3, s.getIndexed().getQueryTimeout(), 0.0);
assertTrue(c.getPersistence() instanceof ProtonEngine.Factory);
assertEquals(1, c.getStorageNodes().getChildren().size());
assertEquals("clu", c.getConfigId());
assertEquals("clu/storage/0", c.getRootGroup().getNodes().get(0).getConfigId());
assertEquals(1, c.getRoot().getHostSystem().getHosts().size());
HostResource h = c.getRoot().getHostSystem().getHost("mockhost");
String [] expectedServices = {"logd", "configproxy","config-sentinel", "qrserver", "storagenode", "searchnode", "distributor", "topleveldispatch", "transactionlogserver"};
assertEquals("clu/storage/0", h.getService("storagenode").getConfigId());
assertEquals("clu/search/cluster.clu/0", h.getService("searchnode").getConfigId());
assertEquals("clu/distributor/0", h.getService("distributor").getConfigId());
assertEquals("clu/search/cluster.clu/tlds/qrc.0.tld.0", h.getService("topleveldispatch").getConfigId());
PartitionsConfig partitionsConfig = new PartitionsConfig((PartitionsConfig.Builder)
m.getConfig(new PartitionsConfig.Builder(), "clu/search/cluster.clu/tlds/qrc.0.tld.0"));
assertTrue(partitionsConfig.dataset(0).engine(0).name_and_port().startsWith("tcp/node0:191"));
}
@Test
public void testConfigIdLookup() throws Exception {
VespaModel m = new VespaModelCreatorWithMockPkg(createAppWithMusic(getHosts(), getBasicServices())).create();
PartitionsConfig partitionsConfig = new PartitionsConfig((PartitionsConfig.Builder)
m.getConfig(new PartitionsConfig.Builder(), "clu/search/cluster.clu/tlds/qrc.0.tld.0"));
assertTrue(partitionsConfig.dataset(0).engine(0).name_and_port().startsWith("tcp/node0:191"));
}
@Test
public void testMultipleSearchNodesOnSameHost() throws Exception {
String services = getServices("<node hostalias='mockhost' distribution-key='0'/>" +
"<node hostalias='mockhost' distribution-key='1'/>");
VespaModel m = new VespaModelCreatorWithMockPkg(createAppWithMusic(getHosts(), services)).create();
PartitionsConfig partitionsConfig = new PartitionsConfig((PartitionsConfig.Builder)
m.getConfig(new PartitionsConfig.Builder(), "clu/search/cluster.clu/tlds/qrc.0.tld.0"));
assertTrue(partitionsConfig.dataset(0).engine(0).name_and_port().startsWith("tcp/node0:191"));
IndexedSearchCluster sc = m.getContentClusters().get("clu").getSearch().getIndexed();
assertEquals(2, sc.getSearchNodeCount());
}
@Test
public void handleStreamingOnlyWithoutPersistence() throws Exception
{
final String musicClusterId = "music-cluster-id";
ContentCluster cluster = createContent(
"<content version='1.0' id='" + musicClusterId + "'>" +
" <redundancy>3</redundancy>"+
" <documents>"+
" <document type='music' mode='streaming'/>"+
" </documents>"+
" <group>"+
" <node hostalias=\"mockhost\" distribution-key=\"0\"/>"+
" </group>"+
"</content>");
ContentSearchCluster s;
s = cluster.getSearch();
assertFalse(s.hasIndexedCluster());
assertEquals(1, s.getClusters().size());
assertNull(s.getIndexed());
AbstractSearchCluster sc = s.getClusters().get(musicClusterId + ".music");
assertEquals(musicClusterId + ".music", sc.getClusterName());
assertEquals(musicClusterId, ((StreamingSearchCluster)sc).getStorageRouteSpec());
assertTrue(cluster.getPersistence() instanceof ProtonEngine.Factory);
assertEquals(1, cluster.getStorageNodes().getChildren().size());
assertEquals(musicClusterId, cluster.getConfigId());
assertEquals(musicClusterId + "/storage/0", cluster.getRootGroup().getNodes().get(0).getConfigId());
assertEquals(1, cluster.getRoot().getHostSystem().getHosts().size());
HostResource h = cluster.getRoot().getHostSystem().getHost("mockhost");
String [] expectedServices = {
"logd", "configproxy",
"config-sentinel", "configserver", "logserver",
"slobrok", "container-clustercontroller",
"storagenode", "distributor","searchnode","transactionlogserver"
};
assertServices(h, expectedServices);
assertEquals(musicClusterId + "/storage/0", h.getService("storagenode").getConfigId());
/* Not yet
assertNotNull(h.getService("qrserver"));
assertNotNull(h.getService("topleveldisptach"));
assertNotNull(h.getService("docproc"));
*/
}
@Test
public void requireThatContentStreamingHandlesMultipleSearchDefinitions() throws Exception
{
final String musicClusterId = "music-cluster-id";
ContentCluster cluster = createContentWithBooksToo(
"<content version='1.0' id='" + musicClusterId + "'>" +
" <redundancy>3</redundancy>"+
" <documents>"+
" <document type='music' mode='streaming'/>"+
" <document type='book' mode='streaming'/>"+
" </documents>"+
" <group>"+
" <node hostalias=\"mockhost\" distribution-key=\"0\"/>"+
" </group>"+
"</content>");
ContentSearchCluster s;
s = cluster.getSearch();
assertFalse(s.hasIndexedCluster());
assertEquals(2, s.getClusters().size());
assertNull(s.getIndexed());
{
String id = musicClusterId + ".book";
AbstractSearchCluster sc = s.getClusters().get(id);
assertEquals(id, sc.getClusterName());
assertEquals(musicClusterId, ((StreamingSearchCluster) sc).getStorageRouteSpec());
}
{
String id = musicClusterId + ".music";
AbstractSearchCluster sc = s.getClusters().get(id);
assertEquals(id, sc.getClusterName());
assertEquals(musicClusterId, ((StreamingSearchCluster) sc).getStorageRouteSpec());
}
assertTrue(cluster.getPersistence() instanceof ProtonEngine.Factory);
assertEquals(1, cluster.getStorageNodes().getChildren().size());
assertEquals(musicClusterId, cluster.getConfigId());
}
@Test
public void handleIndexedWithoutPersistence() throws Exception
{
ContentCluster b = createContent(
"<content version =\"1.0\" id=\"b\">" +
" <redundancy>3</redundancy>"+
" <documents>"+
" <document type='music' mode='index'/>"+
" </documents>"+
" <group>"+
" <node hostalias=\"mockhost\" distribution-key=\"0\"/>"+
" </group>"+
"</content>");
ContentSearchCluster s;
s = b.getSearch();
assertTrue(s.hasIndexedCluster());
assertEquals(1, s.getClusters().size());
assertNotNull(s.getIndexed());
assertEquals("b", s.getIndexed().getClusterName());
assertTrue(b.getPersistence() instanceof ProtonEngine.Factory);
assertEquals(1, b.getStorageNodes().getChildren().size());
assertEquals("b", b.getConfigId());
assertEquals("b/storage/0", b.getRootGroup().getNodes().get(0).getConfigId());
assertEquals(1, b.getRoot().getHostSystem().getHosts().size());
HostResource h = b.getRoot().getHostSystem().getHost("mockhost");
assertEquals("b/storage/0", h.getService("storagenode").getConfigId());
}
@Test
public void canConfigureMmapNoCoreLimit() throws Exception {
ContentCluster b = createContent(
"<content version =\"1.0\" id=\"b\">" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='music' mode='index'/>" +
" </documents>" +
" <group mmap-core-limit=\"200000\">" +
" <node hostalias=\"mockhost\" distribution-key=\"0\" />" +
" <node hostalias=\"mockhost\" distribution-key=\"1\" />" +
" </group>" +
"</content>");
ContentSearchCluster s;
s = b.getSearch();
assertTrue(s.hasIndexedCluster());
assertNotNull(s.getIndexed());
assertEquals(2, b.getStorageNodes().getChildren().size());
assertTrue(b.getRootGroup().getMmapNoCoreLimit().isPresent());
assertEquals(200000, b.getRootGroup().getMmapNoCoreLimit().get().longValue());
assertThat(s.getSearchNodes().size(), is(2));
assertEquals(200000, s.getSearchNodes().get(0).getMMapNoCoreLimit());
assertEquals(200000, s.getSearchNodes().get(1).getMMapNoCoreLimit());
assertEquals("VESPA_MMAP_NOCORE_LIMIT=200000 ", s.getSearchNodes().get(0).getMMapNoCoreEnvVariable());
assertEquals("VESPA_MMAP_NOCORE_LIMIT=200000 ", s.getSearchNodes().get(1).getMMapNoCoreEnvVariable());
}
@Test
public void canConfigureCoreOnOOM() throws Exception {
ContentCluster b = createContent(
"<content version =\"1.0\" id=\"b\">" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='music' mode='index'/>" +
" </documents>" +
" <group core-on-oom=\"true\">" +
" <node hostalias=\"mockhost\" distribution-key=\"0\" />" +
" <node hostalias=\"mockhost\" distribution-key=\"1\" />" +
" </group>" +
"</content>");
ContentSearchCluster s;
s = b.getSearch();
assertTrue(s.hasIndexedCluster());
assertNotNull(s.getIndexed());
assertEquals(2, b.getStorageNodes().getChildren().size());
assertTrue(b.getRootGroup().getCoreOnOOM().isPresent());
assertTrue(b.getRootGroup().getCoreOnOOM().get());
assertThat(s.getSearchNodes().size(), is(2));
assertTrue(s.getSearchNodes().get(0).getCoreOnOOM());
assertTrue(s.getSearchNodes().get(1).getCoreOnOOM());
assertEquals("", s.getSearchNodes().get(0).getCoreOnOOMEnvVariable());
assertEquals("", s.getSearchNodes().get(1).getCoreOnOOMEnvVariable());
}
@Test
public void defaultCoreOnOOMIsFalse() throws Exception {
ContentCluster b = createContent(
"<content version =\"1.0\" id=\"b\">" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='music' mode='index'/>" +
" </documents>" +
" <group>" +
" <node hostalias=\"mockhost\" distribution-key=\"0\" />" +
" <node hostalias=\"mockhost\" distribution-key=\"1\" />" +
" </group>" +
"</content>");
ContentSearchCluster s = b.getSearch();
assertTrue(s.hasIndexedCluster());
assertNotNull(s.getIndexed());
assertEquals(2, b.getStorageNodes().getChildren().size());
assertFalse(b.getRootGroup().getCoreOnOOM().isPresent());
assertThat(s.getSearchNodes().size(), is(2));
assertFalse(s.getSearchNodes().get(0).getCoreOnOOM());
assertFalse(s.getSearchNodes().get(1).getCoreOnOOM());
assertEquals("VESPA_SILENCE_CORE_ON_OOM=true ", s.getSearchNodes().get(0).getCoreOnOOMEnvVariable());
assertEquals("VESPA_SILENCE_CORE_ON_OOM=true ", s.getSearchNodes().get(1).getCoreOnOOMEnvVariable());
}
@Test
public void canConfigureMmapNoCoreLimitPerHost() throws Exception {
ContentCluster b = createContent(
"<content version =\"1.0\" id=\"b\">" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='music' mode='index'/>" +
" </documents>" +
" <group>" +
" <node hostalias=\"mockhost\" distribution-key=\"0\" mmap-core-limit=\"200000\"/>" +
" <node hostalias=\"mockhost\" distribution-key=\"1\" />" +
" </group>" +
"</content>");
ContentSearchCluster s = b.getSearch();
assertTrue(s.hasIndexedCluster());
assertNotNull(s.getIndexed());
assertEquals(2, b.getStorageNodes().getChildren().size());
assertFalse(b.getRootGroup().getMmapNoCoreLimit().isPresent());
assertThat(s.getSearchNodes().size(), is(2));
assertEquals(200000, s.getSearchNodes().get(0).getMMapNoCoreLimit());
assertEquals(-1, s.getSearchNodes().get(1).getMMapNoCoreLimit());
assertEquals("VESPA_MMAP_NOCORE_LIMIT=200000 ", s.getSearchNodes().get(0).getMMapNoCoreEnvVariable());
assertEquals("", s.getSearchNodes().get(1).getMMapNoCoreEnvVariable());
}
@Test
public void canConfigureCoreOnOOMPerHost() throws Exception {
ContentCluster b = createContent(
"<content version =\"1.0\" id=\"b\">" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='music' mode='index'/>" +
" </documents>" +
" <group>" +
" <node hostalias=\"mockhost\" distribution-key=\"0\" core-on-oom=\"true\"/>" +
" <node hostalias=\"mockhost\" distribution-key=\"1\" core-on-oom=\"false\"/>" +
" </group>" +
"</content>");
ContentSearchCluster s = b.getSearch();
assertTrue(s.hasIndexedCluster());
assertNotNull(s.getIndexed());
assertEquals(2, b.getStorageNodes().getChildren().size());
assertFalse(b.getRootGroup().getCoreOnOOM().isPresent());
assertThat(s.getSearchNodes().size(), is(2));
assertTrue(s.getSearchNodes().get(0).getCoreOnOOM());
assertFalse(s.getSearchNodes().get(1).getCoreOnOOM());
assertEquals("", s.getSearchNodes().get(0).getCoreOnOOMEnvVariable());
assertEquals("VESPA_SILENCE_CORE_ON_OOM=true ", s.getSearchNodes().get(1).getCoreOnOOMEnvVariable());
}
@Test
public void canConfigureVespaMalloc() throws Exception {
ContentCluster b = createContent(
"<content version =\"1.0\" id=\"b\">" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='music' mode='index'/>" +
" </documents>" +
" <group no-vespamalloc=\"proton\" vespamalloc-debug=\"distributord\" vespamalloc-debug-stacktrace=\"all\" vespamalloc=\"storaged\">" +
" <node hostalias=\"mockhost\" distribution-key=\"0\"/>" +
" <node hostalias=\"mockhost\" distribution-key=\"1\"/>" +
" <node hostalias=\"mockhost\" distribution-key=\"2\"/>" +
" <node hostalias=\"mockhost\" distribution-key=\"3\"/>" +
" </group>" +
"</content>");
ContentSearchCluster s = b.getSearch();
assertTrue(s.hasIndexedCluster());
assertNotNull(s.getIndexed());
assertEquals(4, b.getStorageNodes().getChildren().size());
assertTrue(b.getRootGroup().getNoVespaMalloc().isPresent());
assertEquals("proton", b.getRootGroup().getNoVespaMalloc().get());
assertTrue(b.getRootGroup().getVespaMalloc().isPresent());
assertEquals("storaged", b.getRootGroup().getVespaMalloc().get());
assertTrue(b.getRootGroup().getVespaMallocDebug().isPresent());
assertEquals("distributord", b.getRootGroup().getVespaMallocDebug().get());
assertTrue(b.getRootGroup().getVespaMallocDebugStackTrace().isPresent());
assertEquals("all", b.getRootGroup().getVespaMallocDebugStackTrace().get());
assertThat(s.getSearchNodes().size(), is(4));
for (SearchNode n : s.getSearchNodes()) {
assertEquals("proton", n.getNoVespaMalloc());
assertEquals("VESPA_USE_NO_VESPAMALLOC=\"proton\" ", n.getNoVespaMallocEnvVariable());
assertEquals("distributord", n.getVespaMallocDebug());
assertEquals("VESPA_USE_VESPAMALLOC=\"storaged\" ", n.getVespaMallocEnvVariable());
assertEquals("all", n.getVespaMallocDebugStackTrace());
assertEquals("VESPA_USE_VESPAMALLOC_D=\"distributord\" ", n.getVespaMallocDebugEnvVariable());
assertEquals("storaged", n.getVespaMalloc());
assertEquals("VESPA_USE_VESPAMALLOC_DST=\"all\" ", n.getVespaMallocDebugStackTraceEnvVariable());
assertEquals("VESPA_SILENCE_CORE_ON_OOM=true VESPA_USE_NO_VESPAMALLOC=\"proton\" VESPA_USE_VESPAMALLOC=\"storaged\" VESPA_USE_VESPAMALLOC_D=\"distributord\" VESPA_USE_VESPAMALLOC_DST=\"all\" ", n.getEnvVariables());
}
}
@Test
public void canConfigureVespaMallocPerHost() throws Exception {
ContentCluster b = createContent(
"<content version =\"1.0\" id=\"b\">" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='music' mode='index'/>" +
" </documents>" +
" <group>" +
" <node hostalias=\"mockhost\" distribution-key=\"0\" no-vespamalloc=\"proton\"/>" +
" <node hostalias=\"mockhost\" distribution-key=\"1\" vespamalloc-debug=\"distributord\"/>" +
" <node hostalias=\"mockhost\" distribution-key=\"2\" vespamalloc-debug-stacktrace=\"all\"/>" +
" <node hostalias=\"mockhost\" distribution-key=\"3\" vespamalloc=\"storaged\"/>" +
" </group>" +
"</content>");
ContentSearchCluster s = b.getSearch();
assertTrue(s.hasIndexedCluster());
assertNotNull(s.getIndexed());
assertEquals(4, b.getStorageNodes().getChildren().size());
assertFalse(b.getRootGroup().getNoVespaMalloc().isPresent());
assertFalse(b.getRootGroup().getVespaMalloc().isPresent());
assertFalse(b.getRootGroup().getVespaMallocDebug().isPresent());
assertFalse(b.getRootGroup().getVespaMallocDebugStackTrace().isPresent());
assertThat(s.getSearchNodes().size(), is(4));
assertEquals("VESPA_SILENCE_CORE_ON_OOM=true VESPA_USE_NO_VESPAMALLOC=\"proton\" ", s.getSearchNodes().get(0).getEnvVariables());
assertEquals("VESPA_SILENCE_CORE_ON_OOM=true VESPA_USE_VESPAMALLOC_D=\"distributord\" ", s.getSearchNodes().get(1).getEnvVariables());
assertEquals("VESPA_SILENCE_CORE_ON_OOM=true VESPA_USE_VESPAMALLOC_DST=\"all\" ", s.getSearchNodes().get(2).getEnvVariables());
assertEquals("VESPA_SILENCE_CORE_ON_OOM=true VESPA_USE_VESPAMALLOC=\"storaged\" ", s.getSearchNodes().get(3).getEnvVariables());
}
@Test
public void canConfigureCpuAffinity() throws Exception
{
ContentCluster b = createContent(
"<content version =\"1.0\" id=\"b\">" +
" <redundancy>2</redundancy>"+
" <documents>"+
" <document type='music' mode='index'/>"+
" </documents>"+
" <group>"+
" <node hostalias=\"mockhost\" distribution-key=\"0\" cpu-socket=\"0\" />"+
" <node hostalias=\"mockhost\" distribution-key=\"1\" cpu-socket=\"1\" />"+
" </group>"+
"</content>");
ContentSearchCluster s;
s = b.getSearch();
assertTrue(s.hasIndexedCluster());
assertNotNull(s.getIndexed());
assertEquals(2, b.getStorageNodes().getChildren().size());
assertTrue(b.getStorageNodes().getChildren().get("0").getAffinity().isPresent());
assertThat(b.getStorageNodes().getChildren().get("0").getAffinity().get().cpuSocket(), is(0));
assertTrue(b.getStorageNodes().getChildren().get("1").getAffinity().isPresent());
assertThat(b.getStorageNodes().getChildren().get("1").getAffinity().get().cpuSocket(), is(1));
assertThat(s.getSearchNodes().size(), is(2));
assertTrue(s.getSearchNodes().get(0).getAffinity().isPresent());
assertThat(s.getSearchNodes().get(0).getAffinity().get().cpuSocket(), is(0));
assertTrue(s.getSearchNodes().get(1).getAffinity().isPresent());
assertThat(s.getSearchNodes().get(1).getAffinity().get().cpuSocket(), is(1));
}
@Test
public void canConfigureCpuAffinityAutomatically() throws Exception
{
ContentCluster b = createContent(
"<content version =\"1.0\" id=\"b\">" +
" <redundancy>2</redundancy>"+
" <documents>"+
" <document type='music' mode='index'/>"+
" </documents>"+
" <group cpu-socket-affinity=\"true\">"+
" <node hostalias=\"mockhost\" distribution-key=\"0\" />"+
" <node hostalias=\"mockhost\" distribution-key=\"1\" />"+
" <node hostalias=\"mockhost\" distribution-key=\"2\" />"+
" <node hostalias=\"mockhost2\" distribution-key=\"3\" />"+
" <node hostalias=\"mockhost2\" distribution-key=\"4\" />"+
" <node hostalias=\"mockhost3\" distribution-key=\"5\" />"+
" </group>"+
"</content>");
ContentSearchCluster s;
s = b.getSearch();
assertTrue(s.hasIndexedCluster());
assertNotNull(s.getIndexed());
assertEquals(6, b.getStorageNodes().getChildren().size());
assertTrue(b.getRootGroup().useCpuSocketAffinity());
assertThat(s.getSearchNodes().size(), is(6));
assertTrue(s.getSearchNodes().get(0).getAffinity().isPresent());
assertTrue(s.getSearchNodes().get(1).getAffinity().isPresent());
assertTrue(s.getSearchNodes().get(2).getAffinity().isPresent());
assertTrue(s.getSearchNodes().get(3).getAffinity().isPresent());
assertTrue(s.getSearchNodes().get(4).getAffinity().isPresent());
assertTrue(s.getSearchNodes().get(5).getAffinity().isPresent());
assertThat(s.getSearchNodes().get(0).getAffinity().get().cpuSocket(),is (0));
assertThat(s.getSearchNodes().get(1).getAffinity().get().cpuSocket(),is (1));
assertThat(s.getSearchNodes().get(2).getAffinity().get().cpuSocket(),is (2));
assertThat(s.getSearchNodes().get(3).getAffinity().get().cpuSocket(),is (0));
assertThat(s.getSearchNodes().get(4).getAffinity().get().cpuSocket(),is (1));
assertThat(s.getSearchNodes().get(5).getAffinity().get().cpuSocket(),is (0));
assertFalse(b.getStorageNodes().getChildren().get("0").getAffinity().isPresent());
assertFalse(b.getStorageNodes().getChildren().get("1").getAffinity().isPresent());
assertFalse(b.getStorageNodes().getChildren().get("2").getAffinity().isPresent());
assertFalse(b.getStorageNodes().getChildren().get("3").getAffinity().isPresent());
assertFalse(b.getStorageNodes().getChildren().get("4").getAffinity().isPresent());
assertFalse(b.getStorageNodes().getChildren().get("5").getAffinity().isPresent());
}
@Test
public void requireBug5357273() throws Exception {
try {
createContent(
" <content version='1.0' id='storage'>\n" +
" <redundancy>3</redundancy>\n" +
" <documents>"+
" <document type='music' mode='index'/>"+
" </documents>" +
" <group>\n" +
" <node hostalias='mockhost' distribution-key='0' />\n" +
" </group>\n" +
" <engine>\n" +
" <vds/>\n" +
" </engine>\n" +
" </content>\n");
assertFalse(true);
} catch (Exception e) {
e.printStackTrace();
assertEquals("Persistence engine does not allow for indexed search. Please use <proton> as your engine.", e.getMessage());
}
}
@Test
public void handleProtonTuning() throws Exception{
ContentCluster a = createContent(
"<content version =\"1.0\" id=\"a\">" +
" <redundancy>3</redundancy>" +
" <engine>" +
" <proton>" +
" <tuning>" +
" <searchnode>" +
" <summary>" +
" <store>" +
" <cache>" +
" <maxsize>8192</maxsize>" +
" <compression>" +
" <type>lz4</type>" +
" <level>8</level>" +
" </compression>" +
" </cache>" +
" </store>" +
" <io>" +
" <read>directio</read>" +
" </io>" +
" </summary>" +
" </searchnode>" +
" </tuning>" +
" </proton>" +
" </engine>" +
" <documents>" +
" <document type='music' mode='index'/>" +
" </documents>" +
" <group>" +
" <node hostalias=\"mockhost\" distribution-key=\"0\"/>" +
" </group>" +
"</content>"
);
assertTrue(a.getPersistence() instanceof ProtonEngine.Factory);
ProtonConfig.Builder pb = new ProtonConfig.Builder();
a.getSearch().getConfig(pb);
List<String> serialize = ConfigInstance.serialize(new ProtonConfig(pb));
String cfg = StringUtilities.implode(serialize.toArray(new String[serialize.size()]), "\n");
assertThat(cfg, containsString("summary.cache.maxbytes 8192"));
assertThat(cfg, containsString("summary.cache.compression.level 8"));
assertThat(cfg, containsString("summary.cache.compression.type LZ4"));
assertThat(cfg, containsString("summary.read.io DIRECTIO"));
}
@Test
public void requireThatUserConfigCanBeSpecifiedForASearchDefinition() throws Exception {
String services = getConfigOverrideServices(
"<node hostalias='mockhost' distribution-key='0'/>",
" <config name='mynamespace.myconfig'>" +
" <myfield>myvalue</myfield>" +
" </config>"
);
VespaModel m = new VespaModelCreatorWithMockPkg(createAppWithMusic(getHosts(), services)).create();
String configId = "clu/search/cluster.clu/music";
{
GenericConfig.GenericConfigBuilder builder =
new GenericConfig.GenericConfigBuilder(new ConfigDefinitionKey("myconfig", "mynamespace"), new ConfigPayloadBuilder());
m.getConfig(builder, configId);
assertEquals(builder.getPayload().getSlime().get().field("myfield").asString(), "myvalue");
}
}
@Test
public void requireOneTldPerSearchContainer() throws Exception {
ContentCluster content = createContent(
" <content version='1.0' id='storage'>\n" +
" <redundancy>1</redundancy>\n" +
" <documents>" +
" <document type='music' mode='index'/>" +
" </documents>" +
" <group>\n" +
" <node hostalias='mockhost' distribution-key='0' />\n" +
" </group>\n" +
" </content>\n" +
" <jdisc version='1.0' id='qrc'>" +
" <search/>" +
" <nodes>" +
" <node hostalias='mockhost' />" +
" </nodes>" +
" </jdisc>" +
" <jdisc version='1.0' id='qrc2'>" +
" <http>" +
" <server id ='server1' port='5000' />" +
" </http>" +
" <search/>" +
" <nodes>" +
" <node hostalias='mockhost' />" +
" <node hostalias='mockhost2' />" +
" </nodes>" +
" </jdisc>"
);
List<Dispatch> tlds = content.getSearch().getIndexed().getTLDs();
assertThat(tlds.get(0).getHostname(), is("node0"));
assertThat(tlds.get(1).getHostname(), is("node0"));
assertThat(tlds.get(2).getHostname(), is("node1"));
assertThat(tlds.size(), is(3));
}
@Test
@Ignore
public void ensureOverrideAppendedOnlyOnce() throws Exception {
ContentCluster content = createContent(
"<content version='1.0' id='search'>" +
" <config name=\"vespa.config.search.core.proton\">" +
" <numthreadspersearch>1</numthreadspersearch>" +
" <search>" +
" <mmap>" +
" <options><item>POPULATE</item></options>" +
" </mmap>" +
" </search>" +
" </config>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='music' mode='index'/>" +
" </documents>" +
" <group>" +
" <node hostalias='mockhost' distribution-key='0'/>" +
" </group>" +
"</content>");
ProtonConfig.Builder builder = new ProtonConfig.Builder();
content.getSearch().getIndexed().getSearchNode(0).cascadeConfig(builder);
content.getSearch().getIndexed().getSearchNode(0).addUserConfig(builder);
ProtonConfig config = new ProtonConfig(builder);
assertThat(config.search().mmap().options().size(), is(1));
assertThat(config.search().mmap().options(0), is(ProtonConfig.Search.Mmap.Options.POPULATE));
}
@Test
public void ensurePruneRemovedDocumentsAgeForHostedVespa() throws Exception {
{
ContentCluster contentNonHosted = createContent(
"<content version='1.0' id='search'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='music' mode='index'/>" +
" </documents>" +
" <nodes>" +
" <node hostalias='mockhost' distribution-key='0'/>" +
" </nodes>" +
"</content>");
ProtonConfig configNonHosted = getProtonConfig(contentNonHosted);
ProtonConfig defaultConfig = new ProtonConfig(new ProtonConfig.Builder());
assertEquals(defaultConfig.pruneremoveddocumentsage(), configNonHosted.pruneremoveddocumentsage(), 0.001);
}
{
String hostedXml = "<services>" +
"<content version='1.0' id='search'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='music' mode='index'/>" +
" </documents>" +
" <nodes count='1'/>" +
"</content>" +
"</services>";
DeployState.Builder deployStateBuilder = new DeployState.Builder().properties(
new DeployProperties.Builder()
.hostedVespa(true)
.build());
VespaModel model = new VespaModelCreatorWithMockPkg(new MockApplicationPackage.Builder()
.withServices(hostedXml)
.withSearchDefinition(MockApplicationPackage.MUSIC_SEARCHDEFINITION)
.build())
.create(deployStateBuilder);
ProtonConfig config = getProtonConfig(model.getContentClusters().values().iterator().next());
assertEquals(349260.0, config.pruneremoveddocumentsage(), 0.001);
}
}
private ProtonConfig getProtonConfig(ContentCluster content) {
ProtonConfig.Builder configBuilder = new ProtonConfig.Builder();
content.getSearch().getIndexed().getSearchNode(0).cascadeConfig(configBuilder);
content.getSearch().getIndexed().getSearchNode(0).addUserConfig(configBuilder);
return new ProtonConfig(configBuilder);
}
ApplicationPackage createAppWithMusic(String hosts, String services) {
return new MockApplicationPackage.Builder()
.withHosts(hosts)
.withServices(services)
.withSearchDefinition(MockApplicationPackage.MUSIC_SEARCHDEFINITION)
.build();
}
} | class ContentBuilderTest extends DomBuilderTest {
private ContentCluster createContent(String xml) throws Exception {
String combined = "" +
"<services>"+
" <admin version='2.0'>" +
" <adminserver hostalias='mockhost'/>" +
" </admin>" +
xml +
"</services>";
VespaModel m = new VespaModelCreatorWithMockPkg(new MockApplicationPackage.Builder()
.withHosts(getHosts())
.withServices(combined)
.withSearchDefinition(MockApplicationPackage.MUSIC_SEARCHDEFINITION)
.build())
.create();
return m.getContentClusters().isEmpty()
? null
: m.getContentClusters().values().iterator().next();
}
private ContentCluster createContentWithBooksToo(String xml) throws Exception {
String combined = "" +
"<services>"+
" <admin version='2.0'>" +
" <adminserver hostalias='mockhost'/>" +
" </admin>" +
xml +
"</services>";
VespaModel m = new VespaModelCreatorWithMockPkg(new MockApplicationPackage.Builder()
.withHosts(getHosts())
.withServices(combined)
.withSearchDefinitions(Arrays.asList(MockApplicationPackage.MUSIC_SEARCHDEFINITION,
MockApplicationPackage.BOOK_SEARCHDEFINITION))
.build())
.create();
return m.getContentClusters().isEmpty()
? null
: m.getContentClusters().values().iterator().next();
}
private String getHosts() {
return "<?xml version='1.0' encoding='utf-8' ?>" +
"<hosts>" +
" <host name='node0'>" +
" <alias>mockhost</alias>" +
" </host>" +
" <host name='node1'>" +
" <alias>mockhost2</alias>" +
" </host>" +
" <host name='node2'>" +
" <alias>mockhost3</alias>" +
" </host>" +
"</hosts>";
}
private String getServices(String groupXml) {
return getConfigOverrideServices(groupXml, "");
}
private String getConfigOverrideServices(String groupXml, String documentOverrides) {
return "" +
"<services>"+
" <admin version='2.0'>" +
" <adminserver hostalias='mockhost'/>" +
" </admin>" +
" <jdisc version='1.0' id='qrc'>" +
" <search/>" +
" <nodes>" +
" <node hostalias='mockhost' />" +
" </nodes>" +
" </jdisc>" +
" <content version='1.0' id='clu'>" +
" <documents>" +
" <document type='music' mode='index'>" +
documentOverrides +
" </document>" +
" </documents>" +
" <redundancy>3</redundancy>"+
" <engine>" +
" <proton>" +
" <query-timeout>7.3</query-timeout>" +
" </proton>" +
" </engine>" +
" <group>"+
groupXml +
" </group>"+
" </content>" +
"</services>";
}
private String getBasicServices() {
return getServices("<node hostalias='mockhost' distribution-key='0'/>");
}
public static void assertServices(HostResource host, String [] services) {
String missing = "";
for (String s : services) {
if (host.getService(s) == null) {
missing += s + ",";
}
}
String extra = "";
for (Service s : host.getServices()) {
boolean found = false;
for (String n : services) {
if (n.equals(s.getServiceName())) {
found = true;
}
}
if (!found) {
extra += s.getServiceName() + ",";
}
}
assertEquals("Missing: Extra: ", "Missing: " + missing+ " Extra: " + extra);
assertEquals(services.length, host.getServices().size());
}
@Test
public void handleSingleNonSearchPersistentDummy() throws Exception {
ContentCluster a = createContent(
"<content version =\"1.0\" id=\"a\">"+
" <redundancy>3</redundancy>"+
" <documents>" +
" <document type=\"music\" mode=\"store-only\"/>" +
" </documents>" +
" <engine>"+
" <dummy/>"+
" </engine>"+
" <group>"+
" <node hostalias=\"mockhost\" distribution-key=\"0\"/>"+
" </group>"+
"</content>");
ContentSearchCluster s = a.getSearch();
assertFalse(s.hasIndexedCluster());
assertTrue(s.getClusters().isEmpty());
assertTrue(a.getPersistence() instanceof com.yahoo.vespa.model.content.engines.DummyPersistence.Factory);
}
@Test
@Test
public void handleSingleNonSearchPersistentProton() throws Exception {
ContentCluster a = createContent(
"<content version =\"1.0\" id=\"a\">"+
" <redundancy>3</redundancy>"+
" <documents>" +
" <document type=\"music\" mode=\"store-only\"/>" +
" </documents>" +
" <engine>"+
" <proton/>"+
" </engine>"+
" <group>"+
" <node hostalias=\"mockhost\" distribution-key=\"0\"/>"+
" </group>"+
"</content>");
ContentSearchCluster s = a.getSearch();
assertFalse(s.hasIndexedCluster());
assertTrue(s.getClusters().isEmpty());
assertTrue(a.getPersistence() instanceof ProtonEngine.Factory);
assertEquals(1, a.getStorageNodes().getChildren().size());
}
@Test
public void handleSingleNonSearchNonPersistentCluster() throws Exception {
ContentCluster a = createContent(
"<content version =\"1.0\" id=\"a\">"+
" <redundancy>3</redundancy>"+
" <documents>" +
" <document type=\"music\" mode=\"store-only\"/>" +
" </documents>" +
" <group>"+
" <node hostalias=\"mockhost\" distribution-key=\"0\"/>"+
" </group>"+
"</content>");
ContentSearchCluster s = a.getSearch();
assertFalse(s.hasIndexedCluster());
assertTrue(s.getClusters().isEmpty());
assertNull(s.getIndexed());
assertNull(a.getRootGroup().getName());
assertNull(a.getRootGroup().getIndex());
assertTrue(a.getRootGroup().getSubgroups().isEmpty());
assertEquals(1, a.getRootGroup().getNodes().size());
assertEquals("node0", a.getRootGroup().getNodes().get(0).getHostName());
assertTrue(a.getPersistence() instanceof ProtonEngine.Factory);
assertEquals(1, a.getStorageNodes().getChildren().size());
assertEquals("a", a.getConfigId());
}
@Test
public void handleIndexedOnlyWithoutPersistence() throws Exception {
VespaModel m = new VespaModelCreatorWithMockPkg(createAppWithMusic(getHosts(), getBasicServices())).create();
ContentCluster c = CollectionUtil.first(m.getContentClusters().values());
ContentSearchCluster s = c.getSearch();
assertTrue(s.hasIndexedCluster());
assertEquals(1, s.getClusters().size());
assertNotNull(s.getIndexed());
assertEquals("clu", s.getIndexed().getClusterName());
assertEquals(7.3, s.getIndexed().getQueryTimeout(), 0.0);
assertTrue(c.getPersistence() instanceof ProtonEngine.Factory);
assertEquals(1, c.getStorageNodes().getChildren().size());
assertEquals("clu", c.getConfigId());
assertEquals("clu/storage/0", c.getRootGroup().getNodes().get(0).getConfigId());
assertEquals(1, c.getRoot().getHostSystem().getHosts().size());
HostResource h = c.getRoot().getHostSystem().getHost("mockhost");
String [] expectedServices = {"logd", "configproxy","config-sentinel", "qrserver", "storagenode", "searchnode", "distributor", "topleveldispatch", "transactionlogserver"};
assertEquals("clu/storage/0", h.getService("storagenode").getConfigId());
assertEquals("clu/search/cluster.clu/0", h.getService("searchnode").getConfigId());
assertEquals("clu/distributor/0", h.getService("distributor").getConfigId());
assertEquals("clu/search/cluster.clu/tlds/qrc.0.tld.0", h.getService("topleveldispatch").getConfigId());
PartitionsConfig partitionsConfig = new PartitionsConfig((PartitionsConfig.Builder)
m.getConfig(new PartitionsConfig.Builder(), "clu/search/cluster.clu/tlds/qrc.0.tld.0"));
assertTrue(partitionsConfig.dataset(0).engine(0).name_and_port().startsWith("tcp/node0:191"));
}
@Test
public void testConfigIdLookup() throws Exception {
VespaModel m = new VespaModelCreatorWithMockPkg(createAppWithMusic(getHosts(), getBasicServices())).create();
PartitionsConfig partitionsConfig = new PartitionsConfig((PartitionsConfig.Builder)
m.getConfig(new PartitionsConfig.Builder(), "clu/search/cluster.clu/tlds/qrc.0.tld.0"));
assertTrue(partitionsConfig.dataset(0).engine(0).name_and_port().startsWith("tcp/node0:191"));
}
@Test
public void testMultipleSearchNodesOnSameHost() throws Exception {
String services = getServices("<node hostalias='mockhost' distribution-key='0'/>" +
"<node hostalias='mockhost' distribution-key='1'/>");
VespaModel m = new VespaModelCreatorWithMockPkg(createAppWithMusic(getHosts(), services)).create();
PartitionsConfig partitionsConfig = new PartitionsConfig((PartitionsConfig.Builder)
m.getConfig(new PartitionsConfig.Builder(), "clu/search/cluster.clu/tlds/qrc.0.tld.0"));
assertTrue(partitionsConfig.dataset(0).engine(0).name_and_port().startsWith("tcp/node0:191"));
IndexedSearchCluster sc = m.getContentClusters().get("clu").getSearch().getIndexed();
assertEquals(2, sc.getSearchNodeCount());
}
@Test
public void handleStreamingOnlyWithoutPersistence() throws Exception
{
final String musicClusterId = "music-cluster-id";
ContentCluster cluster = createContent(
"<content version='1.0' id='" + musicClusterId + "'>" +
" <redundancy>3</redundancy>"+
" <documents>"+
" <document type='music' mode='streaming'/>"+
" </documents>"+
" <group>"+
" <node hostalias=\"mockhost\" distribution-key=\"0\"/>"+
" </group>"+
"</content>");
ContentSearchCluster s;
s = cluster.getSearch();
assertFalse(s.hasIndexedCluster());
assertEquals(1, s.getClusters().size());
assertNull(s.getIndexed());
AbstractSearchCluster sc = s.getClusters().get(musicClusterId + ".music");
assertEquals(musicClusterId + ".music", sc.getClusterName());
assertEquals(musicClusterId, ((StreamingSearchCluster)sc).getStorageRouteSpec());
assertTrue(cluster.getPersistence() instanceof ProtonEngine.Factory);
assertEquals(1, cluster.getStorageNodes().getChildren().size());
assertEquals(musicClusterId, cluster.getConfigId());
assertEquals(musicClusterId + "/storage/0", cluster.getRootGroup().getNodes().get(0).getConfigId());
assertEquals(1, cluster.getRoot().getHostSystem().getHosts().size());
HostResource h = cluster.getRoot().getHostSystem().getHost("mockhost");
String [] expectedServices = {
"logd", "configproxy",
"config-sentinel", "configserver", "logserver",
"slobrok", "container-clustercontroller",
"storagenode", "distributor","searchnode","transactionlogserver"
};
assertServices(h, expectedServices);
assertEquals(musicClusterId + "/storage/0", h.getService("storagenode").getConfigId());
/* Not yet
assertNotNull(h.getService("qrserver"));
assertNotNull(h.getService("topleveldisptach"));
assertNotNull(h.getService("docproc"));
*/
}
@Test
public void requireThatContentStreamingHandlesMultipleSearchDefinitions() throws Exception
{
final String musicClusterId = "music-cluster-id";
ContentCluster cluster = createContentWithBooksToo(
"<content version='1.0' id='" + musicClusterId + "'>" +
" <redundancy>3</redundancy>"+
" <documents>"+
" <document type='music' mode='streaming'/>"+
" <document type='book' mode='streaming'/>"+
" </documents>"+
" <group>"+
" <node hostalias=\"mockhost\" distribution-key=\"0\"/>"+
" </group>"+
"</content>");
ContentSearchCluster s;
s = cluster.getSearch();
assertFalse(s.hasIndexedCluster());
assertEquals(2, s.getClusters().size());
assertNull(s.getIndexed());
{
String id = musicClusterId + ".book";
AbstractSearchCluster sc = s.getClusters().get(id);
assertEquals(id, sc.getClusterName());
assertEquals(musicClusterId, ((StreamingSearchCluster) sc).getStorageRouteSpec());
}
{
String id = musicClusterId + ".music";
AbstractSearchCluster sc = s.getClusters().get(id);
assertEquals(id, sc.getClusterName());
assertEquals(musicClusterId, ((StreamingSearchCluster) sc).getStorageRouteSpec());
}
assertTrue(cluster.getPersistence() instanceof ProtonEngine.Factory);
assertEquals(1, cluster.getStorageNodes().getChildren().size());
assertEquals(musicClusterId, cluster.getConfigId());
}
@Test
public void handleIndexedWithoutPersistence() throws Exception
{
ContentCluster b = createContent(
"<content version =\"1.0\" id=\"b\">" +
" <redundancy>3</redundancy>"+
" <documents>"+
" <document type='music' mode='index'/>"+
" </documents>"+
" <group>"+
" <node hostalias=\"mockhost\" distribution-key=\"0\"/>"+
" </group>"+
"</content>");
ContentSearchCluster s;
s = b.getSearch();
assertTrue(s.hasIndexedCluster());
assertEquals(1, s.getClusters().size());
assertNotNull(s.getIndexed());
assertEquals("b", s.getIndexed().getClusterName());
assertTrue(b.getPersistence() instanceof ProtonEngine.Factory);
assertEquals(1, b.getStorageNodes().getChildren().size());
assertEquals("b", b.getConfigId());
assertEquals("b/storage/0", b.getRootGroup().getNodes().get(0).getConfigId());
assertEquals(1, b.getRoot().getHostSystem().getHosts().size());
HostResource h = b.getRoot().getHostSystem().getHost("mockhost");
assertEquals("b/storage/0", h.getService("storagenode").getConfigId());
}
@Test
public void canConfigureMmapNoCoreLimit() throws Exception {
ContentCluster b = createContent(
"<content version =\"1.0\" id=\"b\">" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='music' mode='index'/>" +
" </documents>" +
" <group mmap-core-limit=\"200000\">" +
" <node hostalias=\"mockhost\" distribution-key=\"0\" />" +
" <node hostalias=\"mockhost\" distribution-key=\"1\" />" +
" </group>" +
"</content>");
ContentSearchCluster s;
s = b.getSearch();
assertTrue(s.hasIndexedCluster());
assertNotNull(s.getIndexed());
assertEquals(2, b.getStorageNodes().getChildren().size());
assertTrue(b.getRootGroup().getMmapNoCoreLimit().isPresent());
assertEquals(200000, b.getRootGroup().getMmapNoCoreLimit().get().longValue());
assertThat(s.getSearchNodes().size(), is(2));
assertEquals(200000, s.getSearchNodes().get(0).getMMapNoCoreLimit());
assertEquals(200000, s.getSearchNodes().get(1).getMMapNoCoreLimit());
assertEquals("VESPA_MMAP_NOCORE_LIMIT=200000 ", s.getSearchNodes().get(0).getMMapNoCoreEnvVariable());
assertEquals("VESPA_MMAP_NOCORE_LIMIT=200000 ", s.getSearchNodes().get(1).getMMapNoCoreEnvVariable());
}
@Test
public void canConfigureCoreOnOOM() throws Exception {
ContentCluster b = createContent(
"<content version =\"1.0\" id=\"b\">" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='music' mode='index'/>" +
" </documents>" +
" <group core-on-oom=\"true\">" +
" <node hostalias=\"mockhost\" distribution-key=\"0\" />" +
" <node hostalias=\"mockhost\" distribution-key=\"1\" />" +
" </group>" +
"</content>");
ContentSearchCluster s;
s = b.getSearch();
assertTrue(s.hasIndexedCluster());
assertNotNull(s.getIndexed());
assertEquals(2, b.getStorageNodes().getChildren().size());
assertTrue(b.getRootGroup().getCoreOnOOM().isPresent());
assertTrue(b.getRootGroup().getCoreOnOOM().get());
assertThat(s.getSearchNodes().size(), is(2));
assertTrue(s.getSearchNodes().get(0).getCoreOnOOM());
assertTrue(s.getSearchNodes().get(1).getCoreOnOOM());
assertEquals("", s.getSearchNodes().get(0).getCoreOnOOMEnvVariable());
assertEquals("", s.getSearchNodes().get(1).getCoreOnOOMEnvVariable());
}
@Test
public void defaultCoreOnOOMIsFalse() throws Exception {
ContentCluster b = createContent(
"<content version =\"1.0\" id=\"b\">" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='music' mode='index'/>" +
" </documents>" +
" <group>" +
" <node hostalias=\"mockhost\" distribution-key=\"0\" />" +
" <node hostalias=\"mockhost\" distribution-key=\"1\" />" +
" </group>" +
"</content>");
ContentSearchCluster s = b.getSearch();
assertTrue(s.hasIndexedCluster());
assertNotNull(s.getIndexed());
assertEquals(2, b.getStorageNodes().getChildren().size());
assertFalse(b.getRootGroup().getCoreOnOOM().isPresent());
assertThat(s.getSearchNodes().size(), is(2));
assertFalse(s.getSearchNodes().get(0).getCoreOnOOM());
assertFalse(s.getSearchNodes().get(1).getCoreOnOOM());
assertEquals("VESPA_SILENCE_CORE_ON_OOM=true ", s.getSearchNodes().get(0).getCoreOnOOMEnvVariable());
assertEquals("VESPA_SILENCE_CORE_ON_OOM=true ", s.getSearchNodes().get(1).getCoreOnOOMEnvVariable());
}
@Test
public void canConfigureMmapNoCoreLimitPerHost() throws Exception {
ContentCluster b = createContent(
"<content version =\"1.0\" id=\"b\">" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='music' mode='index'/>" +
" </documents>" +
" <group>" +
" <node hostalias=\"mockhost\" distribution-key=\"0\" mmap-core-limit=\"200000\"/>" +
" <node hostalias=\"mockhost\" distribution-key=\"1\" />" +
" </group>" +
"</content>");
ContentSearchCluster s = b.getSearch();
assertTrue(s.hasIndexedCluster());
assertNotNull(s.getIndexed());
assertEquals(2, b.getStorageNodes().getChildren().size());
assertFalse(b.getRootGroup().getMmapNoCoreLimit().isPresent());
assertThat(s.getSearchNodes().size(), is(2));
assertEquals(200000, s.getSearchNodes().get(0).getMMapNoCoreLimit());
assertEquals(-1, s.getSearchNodes().get(1).getMMapNoCoreLimit());
assertEquals("VESPA_MMAP_NOCORE_LIMIT=200000 ", s.getSearchNodes().get(0).getMMapNoCoreEnvVariable());
assertEquals("", s.getSearchNodes().get(1).getMMapNoCoreEnvVariable());
}
@Test
public void canConfigureCoreOnOOMPerHost() throws Exception {
ContentCluster b = createContent(
"<content version =\"1.0\" id=\"b\">" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='music' mode='index'/>" +
" </documents>" +
" <group>" +
" <node hostalias=\"mockhost\" distribution-key=\"0\" core-on-oom=\"true\"/>" +
" <node hostalias=\"mockhost\" distribution-key=\"1\" core-on-oom=\"false\"/>" +
" </group>" +
"</content>");
ContentSearchCluster s = b.getSearch();
assertTrue(s.hasIndexedCluster());
assertNotNull(s.getIndexed());
assertEquals(2, b.getStorageNodes().getChildren().size());
assertFalse(b.getRootGroup().getCoreOnOOM().isPresent());
assertThat(s.getSearchNodes().size(), is(2));
assertTrue(s.getSearchNodes().get(0).getCoreOnOOM());
assertFalse(s.getSearchNodes().get(1).getCoreOnOOM());
assertEquals("", s.getSearchNodes().get(0).getCoreOnOOMEnvVariable());
assertEquals("VESPA_SILENCE_CORE_ON_OOM=true ", s.getSearchNodes().get(1).getCoreOnOOMEnvVariable());
}
@Test
public void canConfigureVespaMalloc() throws Exception {
ContentCluster b = createContent(
"<content version =\"1.0\" id=\"b\">" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='music' mode='index'/>" +
" </documents>" +
" <group no-vespamalloc=\"proton\" vespamalloc-debug=\"distributord\" vespamalloc-debug-stacktrace=\"all\" vespamalloc=\"storaged\">" +
" <node hostalias=\"mockhost\" distribution-key=\"0\"/>" +
" <node hostalias=\"mockhost\" distribution-key=\"1\"/>" +
" <node hostalias=\"mockhost\" distribution-key=\"2\"/>" +
" <node hostalias=\"mockhost\" distribution-key=\"3\"/>" +
" </group>" +
"</content>");
ContentSearchCluster s = b.getSearch();
assertTrue(s.hasIndexedCluster());
assertNotNull(s.getIndexed());
assertEquals(4, b.getStorageNodes().getChildren().size());
assertTrue(b.getRootGroup().getNoVespaMalloc().isPresent());
assertEquals("proton", b.getRootGroup().getNoVespaMalloc().get());
assertTrue(b.getRootGroup().getVespaMalloc().isPresent());
assertEquals("storaged", b.getRootGroup().getVespaMalloc().get());
assertTrue(b.getRootGroup().getVespaMallocDebug().isPresent());
assertEquals("distributord", b.getRootGroup().getVespaMallocDebug().get());
assertTrue(b.getRootGroup().getVespaMallocDebugStackTrace().isPresent());
assertEquals("all", b.getRootGroup().getVespaMallocDebugStackTrace().get());
assertThat(s.getSearchNodes().size(), is(4));
for (SearchNode n : s.getSearchNodes()) {
assertEquals("proton", n.getNoVespaMalloc());
assertEquals("VESPA_USE_NO_VESPAMALLOC=\"proton\" ", n.getNoVespaMallocEnvVariable());
assertEquals("distributord", n.getVespaMallocDebug());
assertEquals("VESPA_USE_VESPAMALLOC=\"storaged\" ", n.getVespaMallocEnvVariable());
assertEquals("all", n.getVespaMallocDebugStackTrace());
assertEquals("VESPA_USE_VESPAMALLOC_D=\"distributord\" ", n.getVespaMallocDebugEnvVariable());
assertEquals("storaged", n.getVespaMalloc());
assertEquals("VESPA_USE_VESPAMALLOC_DST=\"all\" ", n.getVespaMallocDebugStackTraceEnvVariable());
assertEquals("VESPA_SILENCE_CORE_ON_OOM=true VESPA_USE_NO_VESPAMALLOC=\"proton\" VESPA_USE_VESPAMALLOC=\"storaged\" VESPA_USE_VESPAMALLOC_D=\"distributord\" VESPA_USE_VESPAMALLOC_DST=\"all\" ", n.getEnvVariables());
}
}
@Test
public void canConfigureVespaMallocPerHost() throws Exception {
ContentCluster b = createContent(
"<content version =\"1.0\" id=\"b\">" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='music' mode='index'/>" +
" </documents>" +
" <group>" +
" <node hostalias=\"mockhost\" distribution-key=\"0\" no-vespamalloc=\"proton\"/>" +
" <node hostalias=\"mockhost\" distribution-key=\"1\" vespamalloc-debug=\"distributord\"/>" +
" <node hostalias=\"mockhost\" distribution-key=\"2\" vespamalloc-debug-stacktrace=\"all\"/>" +
" <node hostalias=\"mockhost\" distribution-key=\"3\" vespamalloc=\"storaged\"/>" +
" </group>" +
"</content>");
ContentSearchCluster s = b.getSearch();
assertTrue(s.hasIndexedCluster());
assertNotNull(s.getIndexed());
assertEquals(4, b.getStorageNodes().getChildren().size());
assertFalse(b.getRootGroup().getNoVespaMalloc().isPresent());
assertFalse(b.getRootGroup().getVespaMalloc().isPresent());
assertFalse(b.getRootGroup().getVespaMallocDebug().isPresent());
assertFalse(b.getRootGroup().getVespaMallocDebugStackTrace().isPresent());
assertThat(s.getSearchNodes().size(), is(4));
assertEquals("VESPA_SILENCE_CORE_ON_OOM=true VESPA_USE_NO_VESPAMALLOC=\"proton\" ", s.getSearchNodes().get(0).getEnvVariables());
assertEquals("VESPA_SILENCE_CORE_ON_OOM=true VESPA_USE_VESPAMALLOC_D=\"distributord\" ", s.getSearchNodes().get(1).getEnvVariables());
assertEquals("VESPA_SILENCE_CORE_ON_OOM=true VESPA_USE_VESPAMALLOC_DST=\"all\" ", s.getSearchNodes().get(2).getEnvVariables());
assertEquals("VESPA_SILENCE_CORE_ON_OOM=true VESPA_USE_VESPAMALLOC=\"storaged\" ", s.getSearchNodes().get(3).getEnvVariables());
}
@Test
public void canConfigureCpuAffinity() throws Exception
{
ContentCluster b = createContent(
"<content version =\"1.0\" id=\"b\">" +
" <redundancy>2</redundancy>"+
" <documents>"+
" <document type='music' mode='index'/>"+
" </documents>"+
" <group>"+
" <node hostalias=\"mockhost\" distribution-key=\"0\" cpu-socket=\"0\" />"+
" <node hostalias=\"mockhost\" distribution-key=\"1\" cpu-socket=\"1\" />"+
" </group>"+
"</content>");
ContentSearchCluster s;
s = b.getSearch();
assertTrue(s.hasIndexedCluster());
assertNotNull(s.getIndexed());
assertEquals(2, b.getStorageNodes().getChildren().size());
assertTrue(b.getStorageNodes().getChildren().get("0").getAffinity().isPresent());
assertThat(b.getStorageNodes().getChildren().get("0").getAffinity().get().cpuSocket(), is(0));
assertTrue(b.getStorageNodes().getChildren().get("1").getAffinity().isPresent());
assertThat(b.getStorageNodes().getChildren().get("1").getAffinity().get().cpuSocket(), is(1));
assertThat(s.getSearchNodes().size(), is(2));
assertTrue(s.getSearchNodes().get(0).getAffinity().isPresent());
assertThat(s.getSearchNodes().get(0).getAffinity().get().cpuSocket(), is(0));
assertTrue(s.getSearchNodes().get(1).getAffinity().isPresent());
assertThat(s.getSearchNodes().get(1).getAffinity().get().cpuSocket(), is(1));
}
@Test
public void canConfigureCpuAffinityAutomatically() throws Exception
{
ContentCluster b = createContent(
"<content version =\"1.0\" id=\"b\">" +
" <redundancy>2</redundancy>"+
" <documents>"+
" <document type='music' mode='index'/>"+
" </documents>"+
" <group cpu-socket-affinity=\"true\">"+
" <node hostalias=\"mockhost\" distribution-key=\"0\" />"+
" <node hostalias=\"mockhost\" distribution-key=\"1\" />"+
" <node hostalias=\"mockhost\" distribution-key=\"2\" />"+
" <node hostalias=\"mockhost2\" distribution-key=\"3\" />"+
" <node hostalias=\"mockhost2\" distribution-key=\"4\" />"+
" <node hostalias=\"mockhost3\" distribution-key=\"5\" />"+
" </group>"+
"</content>");
ContentSearchCluster s;
s = b.getSearch();
assertTrue(s.hasIndexedCluster());
assertNotNull(s.getIndexed());
assertEquals(6, b.getStorageNodes().getChildren().size());
assertTrue(b.getRootGroup().useCpuSocketAffinity());
assertThat(s.getSearchNodes().size(), is(6));
assertTrue(s.getSearchNodes().get(0).getAffinity().isPresent());
assertTrue(s.getSearchNodes().get(1).getAffinity().isPresent());
assertTrue(s.getSearchNodes().get(2).getAffinity().isPresent());
assertTrue(s.getSearchNodes().get(3).getAffinity().isPresent());
assertTrue(s.getSearchNodes().get(4).getAffinity().isPresent());
assertTrue(s.getSearchNodes().get(5).getAffinity().isPresent());
assertThat(s.getSearchNodes().get(0).getAffinity().get().cpuSocket(),is (0));
assertThat(s.getSearchNodes().get(1).getAffinity().get().cpuSocket(),is (1));
assertThat(s.getSearchNodes().get(2).getAffinity().get().cpuSocket(),is (2));
assertThat(s.getSearchNodes().get(3).getAffinity().get().cpuSocket(),is (0));
assertThat(s.getSearchNodes().get(4).getAffinity().get().cpuSocket(),is (1));
assertThat(s.getSearchNodes().get(5).getAffinity().get().cpuSocket(),is (0));
assertFalse(b.getStorageNodes().getChildren().get("0").getAffinity().isPresent());
assertFalse(b.getStorageNodes().getChildren().get("1").getAffinity().isPresent());
assertFalse(b.getStorageNodes().getChildren().get("2").getAffinity().isPresent());
assertFalse(b.getStorageNodes().getChildren().get("3").getAffinity().isPresent());
assertFalse(b.getStorageNodes().getChildren().get("4").getAffinity().isPresent());
assertFalse(b.getStorageNodes().getChildren().get("5").getAffinity().isPresent());
}
@Test
public void requireBug5357273() throws Exception {
try {
createContent(
" <content version='1.0' id='storage'>\n" +
" <redundancy>3</redundancy>\n" +
" <documents>"+
" <document type='music' mode='index'/>"+
" </documents>" +
" <group>\n" +
" <node hostalias='mockhost' distribution-key='0' />\n" +
" </group>\n" +
" <engine>\n" +
" <vds/>\n" +
" </engine>\n" +
" </content>\n");
assertFalse(true);
} catch (Exception e) {
e.printStackTrace();
assertEquals("Persistence engine does not allow for indexed search. Please use <proton> as your engine.", e.getMessage());
}
}
@Test
public void handleProtonTuning() throws Exception{
ContentCluster a = createContent(
"<content version =\"1.0\" id=\"a\">" +
" <redundancy>3</redundancy>" +
" <engine>" +
" <proton>" +
" <tuning>" +
" <searchnode>" +
" <summary>" +
" <store>" +
" <cache>" +
" <maxsize>8192</maxsize>" +
" <compression>" +
" <type>lz4</type>" +
" <level>8</level>" +
" </compression>" +
" </cache>" +
" </store>" +
" <io>" +
" <read>directio</read>" +
" </io>" +
" </summary>" +
" </searchnode>" +
" </tuning>" +
" </proton>" +
" </engine>" +
" <documents>" +
" <document type='music' mode='index'/>" +
" </documents>" +
" <group>" +
" <node hostalias=\"mockhost\" distribution-key=\"0\"/>" +
" </group>" +
"</content>"
);
assertTrue(a.getPersistence() instanceof ProtonEngine.Factory);
ProtonConfig.Builder pb = new ProtonConfig.Builder();
a.getSearch().getConfig(pb);
List<String> serialize = ConfigInstance.serialize(new ProtonConfig(pb));
String cfg = StringUtilities.implode(serialize.toArray(new String[serialize.size()]), "\n");
assertThat(cfg, containsString("summary.cache.maxbytes 8192"));
assertThat(cfg, containsString("summary.cache.compression.level 8"));
assertThat(cfg, containsString("summary.cache.compression.type LZ4"));
assertThat(cfg, containsString("summary.read.io DIRECTIO"));
}
@Test
public void requireThatUserConfigCanBeSpecifiedForASearchDefinition() throws Exception {
String services = getConfigOverrideServices(
"<node hostalias='mockhost' distribution-key='0'/>",
" <config name='mynamespace.myconfig'>" +
" <myfield>myvalue</myfield>" +
" </config>"
);
VespaModel m = new VespaModelCreatorWithMockPkg(createAppWithMusic(getHosts(), services)).create();
String configId = "clu/search/cluster.clu/music";
{
GenericConfig.GenericConfigBuilder builder =
new GenericConfig.GenericConfigBuilder(new ConfigDefinitionKey("myconfig", "mynamespace"), new ConfigPayloadBuilder());
m.getConfig(builder, configId);
assertEquals(builder.getPayload().getSlime().get().field("myfield").asString(), "myvalue");
}
}
@Test
public void requireOneTldPerSearchContainer() throws Exception {
ContentCluster content = createContent(
" <content version='1.0' id='storage'>\n" +
" <redundancy>1</redundancy>\n" +
" <documents>" +
" <document type='music' mode='index'/>" +
" </documents>" +
" <group>\n" +
" <node hostalias='mockhost' distribution-key='0' />\n" +
" </group>\n" +
" </content>\n" +
" <jdisc version='1.0' id='qrc'>" +
" <search/>" +
" <nodes>" +
" <node hostalias='mockhost' />" +
" </nodes>" +
" </jdisc>" +
" <jdisc version='1.0' id='qrc2'>" +
" <http>" +
" <server id ='server1' port='5000' />" +
" </http>" +
" <search/>" +
" <nodes>" +
" <node hostalias='mockhost' />" +
" <node hostalias='mockhost2' />" +
" </nodes>" +
" </jdisc>"
);
List<Dispatch> tlds = content.getSearch().getIndexed().getTLDs();
assertThat(tlds.get(0).getHostname(), is("node0"));
assertThat(tlds.get(1).getHostname(), is("node0"));
assertThat(tlds.get(2).getHostname(), is("node1"));
assertThat(tlds.size(), is(3));
}
@Test
@Ignore
public void ensureOverrideAppendedOnlyOnce() throws Exception {
ContentCluster content = createContent(
"<content version='1.0' id='search'>" +
" <config name=\"vespa.config.search.core.proton\">" +
" <numthreadspersearch>1</numthreadspersearch>" +
" <search>" +
" <mmap>" +
" <options><item>POPULATE</item></options>" +
" </mmap>" +
" </search>" +
" </config>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='music' mode='index'/>" +
" </documents>" +
" <group>" +
" <node hostalias='mockhost' distribution-key='0'/>" +
" </group>" +
"</content>");
ProtonConfig.Builder builder = new ProtonConfig.Builder();
content.getSearch().getIndexed().getSearchNode(0).cascadeConfig(builder);
content.getSearch().getIndexed().getSearchNode(0).addUserConfig(builder);
ProtonConfig config = new ProtonConfig(builder);
assertThat(config.search().mmap().options().size(), is(1));
assertThat(config.search().mmap().options(0), is(ProtonConfig.Search.Mmap.Options.POPULATE));
}
@Test
public void ensurePruneRemovedDocumentsAgeForHostedVespa() throws Exception {
{
ContentCluster contentNonHosted = createContent(
"<content version='1.0' id='search'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='music' mode='index'/>" +
" </documents>" +
" <nodes>" +
" <node hostalias='mockhost' distribution-key='0'/>" +
" </nodes>" +
"</content>");
ProtonConfig configNonHosted = getProtonConfig(contentNonHosted);
ProtonConfig defaultConfig = new ProtonConfig(new ProtonConfig.Builder());
assertEquals(defaultConfig.pruneremoveddocumentsage(), configNonHosted.pruneremoveddocumentsage(), 0.001);
}
{
String hostedXml = "<services>" +
"<content version='1.0' id='search'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='music' mode='index'/>" +
" </documents>" +
" <nodes count='1'/>" +
"</content>" +
"</services>";
DeployState.Builder deployStateBuilder = new DeployState.Builder().properties(
new DeployProperties.Builder()
.hostedVespa(true)
.build());
VespaModel model = new VespaModelCreatorWithMockPkg(new MockApplicationPackage.Builder()
.withServices(hostedXml)
.withSearchDefinition(MockApplicationPackage.MUSIC_SEARCHDEFINITION)
.build())
.create(deployStateBuilder);
ProtonConfig config = getProtonConfig(model.getContentClusters().values().iterator().next());
assertEquals(349260.0, config.pruneremoveddocumentsage(), 0.001);
}
}
private ProtonConfig getProtonConfig(ContentCluster content) {
ProtonConfig.Builder configBuilder = new ProtonConfig.Builder();
content.getSearch().getIndexed().getSearchNode(0).cascadeConfig(configBuilder);
content.getSearch().getIndexed().getSearchNode(0).addUserConfig(configBuilder);
return new ProtonConfig(configBuilder);
}
ApplicationPackage createAppWithMusic(String hosts, String services) {
return new MockApplicationPackage.Builder()
.withHosts(hosts)
.withServices(services)
.withSearchDefinition(MockApplicationPackage.MUSIC_SEARCHDEFINITION)
.build();
}
} |
this looks like it will strip all instances of 'X' and 'Y', surely that was not the intention? | public static String stripInvalidCharacters(String string) {
StringBuilder stripped = null;
for (int i = 0; i < string.length(); i++) {
int codePoint = string.codePointAt(i);
if ( ! Text.isTextCharacter(codePoint) || codePoint == 'X' || codePoint == 'Y') {
if (stripped == null)
stripped = new StringBuilder(string.substring(0, i));
stripped.append(' ');
}
else if (stripped != null) {
stripped.appendCodePoint(codePoint);
}
if (Character.isHighSurrogate(string.charAt(i)))
++i;
}
return stripped != null ? stripped.toString() : string;
} | if ( ! Text.isTextCharacter(codePoint) || codePoint == 'X' || codePoint == 'Y') { | public static String stripInvalidCharacters(String string) {
StringBuilder stripped = null;
for (int i = 0; i < string.length(); i++) {
int codePoint = string.codePointAt(i);
if ( ! Text.isTextCharacter(codePoint)) {
if (stripped == null)
stripped = new StringBuilder(string.substring(0, i));
stripped.append(' ');
}
else if (stripped != null) {
stripped.appendCodePoint(codePoint);
}
if (Character.isHighSurrogate(string.charAt(i)))
++i;
}
return stripped != null ? stripped.toString() : string;
} | class Text {
private static final boolean[] allowedAsciiChars = new boolean[0x80];
static {
allowedAsciiChars[0x0] = false;
allowedAsciiChars[0x1] = false;
allowedAsciiChars[0x2] = false;
allowedAsciiChars[0x3] = false;
allowedAsciiChars[0x4] = false;
allowedAsciiChars[0x5] = false;
allowedAsciiChars[0x6] = false;
allowedAsciiChars[0x7] = false;
allowedAsciiChars[0x8] = false;
allowedAsciiChars[0x9] = true;
allowedAsciiChars[0xA] = true;
allowedAsciiChars[0xB] = false;
allowedAsciiChars[0xC] = false;
allowedAsciiChars[0xD] = true;
for (int i = 0xE; i < 0x20; i++) {
allowedAsciiChars[i] = false;
}
for (int i = 0x20; i < 0x7F; i++) {
allowedAsciiChars[i] = true;
}
allowedAsciiChars[0x7F] = true;
}
/** No instantiation */
private Text() {}
/**
* Returns whether the given codepoint is a valid text character, potentially suitable for
* purposes such as indexing and display, see http:
*/
public static boolean isTextCharacter(int codepoint) {
if (codepoint < 0x80) return allowedAsciiChars[codepoint];
if (codepoint < 0xFDD0) return true;
if (codepoint <= 0xFDDF) return false;
if (codepoint < 0x1FFFE) return true;
if (codepoint <= 0x1FFFF) return false;
if (codepoint < 0x2FFFE) return true;
if (codepoint <= 0x2FFFF) return false;
if (codepoint < 0x3FFFE) return true;
if (codepoint <= 0x3FFFF) return false;
if (codepoint < 0x4FFFE) return true;
if (codepoint <= 0x4FFFF) return false;
if (codepoint < 0x5FFFE) return true;
if (codepoint <= 0x5FFFF) return false;
if (codepoint < 0x6FFFE) return true;
if (codepoint <= 0x6FFFF) return false;
if (codepoint < 0x7FFFE) return true;
if (codepoint <= 0x7FFFF) return false;
if (codepoint < 0x8FFFE) return true;
if (codepoint <= 0x8FFFF) return false;
if (codepoint < 0x9FFFE) return true;
if (codepoint <= 0x9FFFF) return false;
if (codepoint < 0xAFFFE) return true;
if (codepoint <= 0xAFFFF) return false;
if (codepoint < 0xBFFFE) return true;
if (codepoint <= 0xBFFFF) return false;
if (codepoint < 0xCFFFE) return true;
if (codepoint <= 0xCFFFF) return false;
if (codepoint < 0xDFFFE) return true;
if (codepoint <= 0xDFFFF) return false;
if (codepoint < 0xEFFFE) return true;
if (codepoint <= 0xEFFFF) return false;
if (codepoint < 0xFFFFE) return true;
if (codepoint <= 0xFFFFF) return false;
if (codepoint < 0x10FFFE) return true;
if (codepoint <= 0x10FFFF) return false;
return true;
}
/**
* Validates that the given string value only contains text characters and
* returns the first illegal code point if one is found.
*/
public static OptionalInt validateTextString(String string) {
for (int i = 0; i < string.length(); i++) {
int codePoint = string.codePointAt(i);
if ( ! Text.isTextCharacter(codePoint))
return OptionalInt.of(codePoint);
if (Character.isHighSurrogate(string.charAt(i)))
++i;
}
return OptionalInt.empty();
}
/**
* Returns a string where any invalid characters in the input string is replaced by spaces
*/
} | class Text {
private static final boolean[] allowedAsciiChars = new boolean[0x80];
static {
allowedAsciiChars[0x0] = false;
allowedAsciiChars[0x1] = false;
allowedAsciiChars[0x2] = false;
allowedAsciiChars[0x3] = false;
allowedAsciiChars[0x4] = false;
allowedAsciiChars[0x5] = false;
allowedAsciiChars[0x6] = false;
allowedAsciiChars[0x7] = false;
allowedAsciiChars[0x8] = false;
allowedAsciiChars[0x9] = true;
allowedAsciiChars[0xA] = true;
allowedAsciiChars[0xB] = false;
allowedAsciiChars[0xC] = false;
allowedAsciiChars[0xD] = true;
for (int i = 0xE; i < 0x20; i++) {
allowedAsciiChars[i] = false;
}
for (int i = 0x20; i < 0x7F; i++) {
allowedAsciiChars[i] = true;
}
allowedAsciiChars[0x7F] = true;
}
/** No instantiation */
private Text() {}
/**
* Returns whether the given codepoint is a valid text character, potentially suitable for
* purposes such as indexing and display, see http:
*/
public static boolean isTextCharacter(int codepoint) {
if (codepoint < 0x80) return allowedAsciiChars[codepoint];
if (codepoint < 0xFDD0) return true;
if (codepoint <= 0xFDDF) return false;
if (codepoint < 0x1FFFE) return true;
if (codepoint <= 0x1FFFF) return false;
if (codepoint < 0x2FFFE) return true;
if (codepoint <= 0x2FFFF) return false;
if (codepoint < 0x3FFFE) return true;
if (codepoint <= 0x3FFFF) return false;
if (codepoint < 0x4FFFE) return true;
if (codepoint <= 0x4FFFF) return false;
if (codepoint < 0x5FFFE) return true;
if (codepoint <= 0x5FFFF) return false;
if (codepoint < 0x6FFFE) return true;
if (codepoint <= 0x6FFFF) return false;
if (codepoint < 0x7FFFE) return true;
if (codepoint <= 0x7FFFF) return false;
if (codepoint < 0x8FFFE) return true;
if (codepoint <= 0x8FFFF) return false;
if (codepoint < 0x9FFFE) return true;
if (codepoint <= 0x9FFFF) return false;
if (codepoint < 0xAFFFE) return true;
if (codepoint <= 0xAFFFF) return false;
if (codepoint < 0xBFFFE) return true;
if (codepoint <= 0xBFFFF) return false;
if (codepoint < 0xCFFFE) return true;
if (codepoint <= 0xCFFFF) return false;
if (codepoint < 0xDFFFE) return true;
if (codepoint <= 0xDFFFF) return false;
if (codepoint < 0xEFFFE) return true;
if (codepoint <= 0xEFFFF) return false;
if (codepoint < 0xFFFFE) return true;
if (codepoint <= 0xFFFFF) return false;
if (codepoint < 0x10FFFE) return true;
if (codepoint <= 0x10FFFF) return false;
return true;
}
/**
* Validates that the given string value only contains text characters and
* returns the first illegal code point if one is found.
*/
public static OptionalInt validateTextString(String string) {
for (int i = 0; i < string.length(); i++) {
int codePoint = string.codePointAt(i);
if ( ! Text.isTextCharacter(codePoint))
return OptionalInt.of(codePoint);
if (Character.isHighSurrogate(string.charAt(i)))
++i;
}
return OptionalInt.empty();
}
/**
* Returns a string where any invalid characters in the input string is replaced by spaces
*/
} |
Omg, no. Thanks! | public static String stripInvalidCharacters(String string) {
StringBuilder stripped = null;
for (int i = 0; i < string.length(); i++) {
int codePoint = string.codePointAt(i);
if ( ! Text.isTextCharacter(codePoint) || codePoint == 'X' || codePoint == 'Y') {
if (stripped == null)
stripped = new StringBuilder(string.substring(0, i));
stripped.append(' ');
}
else if (stripped != null) {
stripped.appendCodePoint(codePoint);
}
if (Character.isHighSurrogate(string.charAt(i)))
++i;
}
return stripped != null ? stripped.toString() : string;
} | if ( ! Text.isTextCharacter(codePoint) || codePoint == 'X' || codePoint == 'Y') { | public static String stripInvalidCharacters(String string) {
StringBuilder stripped = null;
for (int i = 0; i < string.length(); i++) {
int codePoint = string.codePointAt(i);
if ( ! Text.isTextCharacter(codePoint)) {
if (stripped == null)
stripped = new StringBuilder(string.substring(0, i));
stripped.append(' ');
}
else if (stripped != null) {
stripped.appendCodePoint(codePoint);
}
if (Character.isHighSurrogate(string.charAt(i)))
++i;
}
return stripped != null ? stripped.toString() : string;
} | class Text {
private static final boolean[] allowedAsciiChars = new boolean[0x80];
static {
allowedAsciiChars[0x0] = false;
allowedAsciiChars[0x1] = false;
allowedAsciiChars[0x2] = false;
allowedAsciiChars[0x3] = false;
allowedAsciiChars[0x4] = false;
allowedAsciiChars[0x5] = false;
allowedAsciiChars[0x6] = false;
allowedAsciiChars[0x7] = false;
allowedAsciiChars[0x8] = false;
allowedAsciiChars[0x9] = true;
allowedAsciiChars[0xA] = true;
allowedAsciiChars[0xB] = false;
allowedAsciiChars[0xC] = false;
allowedAsciiChars[0xD] = true;
for (int i = 0xE; i < 0x20; i++) {
allowedAsciiChars[i] = false;
}
for (int i = 0x20; i < 0x7F; i++) {
allowedAsciiChars[i] = true;
}
allowedAsciiChars[0x7F] = true;
}
/** No instantiation */
private Text() {}
/**
* Returns whether the given codepoint is a valid text character, potentially suitable for
* purposes such as indexing and display, see http:
*/
public static boolean isTextCharacter(int codepoint) {
if (codepoint < 0x80) return allowedAsciiChars[codepoint];
if (codepoint < 0xFDD0) return true;
if (codepoint <= 0xFDDF) return false;
if (codepoint < 0x1FFFE) return true;
if (codepoint <= 0x1FFFF) return false;
if (codepoint < 0x2FFFE) return true;
if (codepoint <= 0x2FFFF) return false;
if (codepoint < 0x3FFFE) return true;
if (codepoint <= 0x3FFFF) return false;
if (codepoint < 0x4FFFE) return true;
if (codepoint <= 0x4FFFF) return false;
if (codepoint < 0x5FFFE) return true;
if (codepoint <= 0x5FFFF) return false;
if (codepoint < 0x6FFFE) return true;
if (codepoint <= 0x6FFFF) return false;
if (codepoint < 0x7FFFE) return true;
if (codepoint <= 0x7FFFF) return false;
if (codepoint < 0x8FFFE) return true;
if (codepoint <= 0x8FFFF) return false;
if (codepoint < 0x9FFFE) return true;
if (codepoint <= 0x9FFFF) return false;
if (codepoint < 0xAFFFE) return true;
if (codepoint <= 0xAFFFF) return false;
if (codepoint < 0xBFFFE) return true;
if (codepoint <= 0xBFFFF) return false;
if (codepoint < 0xCFFFE) return true;
if (codepoint <= 0xCFFFF) return false;
if (codepoint < 0xDFFFE) return true;
if (codepoint <= 0xDFFFF) return false;
if (codepoint < 0xEFFFE) return true;
if (codepoint <= 0xEFFFF) return false;
if (codepoint < 0xFFFFE) return true;
if (codepoint <= 0xFFFFF) return false;
if (codepoint < 0x10FFFE) return true;
if (codepoint <= 0x10FFFF) return false;
return true;
}
/**
* Validates that the given string value only contains text characters and
* returns the first illegal code point if one is found.
*/
public static OptionalInt validateTextString(String string) {
for (int i = 0; i < string.length(); i++) {
int codePoint = string.codePointAt(i);
if ( ! Text.isTextCharacter(codePoint))
return OptionalInt.of(codePoint);
if (Character.isHighSurrogate(string.charAt(i)))
++i;
}
return OptionalInt.empty();
}
/**
* Returns a string where any invalid characters in the input string is replaced by spaces
*/
} | class Text {
private static final boolean[] allowedAsciiChars = new boolean[0x80];
static {
allowedAsciiChars[0x0] = false;
allowedAsciiChars[0x1] = false;
allowedAsciiChars[0x2] = false;
allowedAsciiChars[0x3] = false;
allowedAsciiChars[0x4] = false;
allowedAsciiChars[0x5] = false;
allowedAsciiChars[0x6] = false;
allowedAsciiChars[0x7] = false;
allowedAsciiChars[0x8] = false;
allowedAsciiChars[0x9] = true;
allowedAsciiChars[0xA] = true;
allowedAsciiChars[0xB] = false;
allowedAsciiChars[0xC] = false;
allowedAsciiChars[0xD] = true;
for (int i = 0xE; i < 0x20; i++) {
allowedAsciiChars[i] = false;
}
for (int i = 0x20; i < 0x7F; i++) {
allowedAsciiChars[i] = true;
}
allowedAsciiChars[0x7F] = true;
}
/** No instantiation */
private Text() {}
/**
* Returns whether the given codepoint is a valid text character, potentially suitable for
* purposes such as indexing and display, see http:
*/
public static boolean isTextCharacter(int codepoint) {
if (codepoint < 0x80) return allowedAsciiChars[codepoint];
if (codepoint < 0xFDD0) return true;
if (codepoint <= 0xFDDF) return false;
if (codepoint < 0x1FFFE) return true;
if (codepoint <= 0x1FFFF) return false;
if (codepoint < 0x2FFFE) return true;
if (codepoint <= 0x2FFFF) return false;
if (codepoint < 0x3FFFE) return true;
if (codepoint <= 0x3FFFF) return false;
if (codepoint < 0x4FFFE) return true;
if (codepoint <= 0x4FFFF) return false;
if (codepoint < 0x5FFFE) return true;
if (codepoint <= 0x5FFFF) return false;
if (codepoint < 0x6FFFE) return true;
if (codepoint <= 0x6FFFF) return false;
if (codepoint < 0x7FFFE) return true;
if (codepoint <= 0x7FFFF) return false;
if (codepoint < 0x8FFFE) return true;
if (codepoint <= 0x8FFFF) return false;
if (codepoint < 0x9FFFE) return true;
if (codepoint <= 0x9FFFF) return false;
if (codepoint < 0xAFFFE) return true;
if (codepoint <= 0xAFFFF) return false;
if (codepoint < 0xBFFFE) return true;
if (codepoint <= 0xBFFFF) return false;
if (codepoint < 0xCFFFE) return true;
if (codepoint <= 0xCFFFF) return false;
if (codepoint < 0xDFFFE) return true;
if (codepoint <= 0xDFFFF) return false;
if (codepoint < 0xEFFFE) return true;
if (codepoint <= 0xEFFFF) return false;
if (codepoint < 0xFFFFE) return true;
if (codepoint <= 0xFFFFF) return false;
if (codepoint < 0x10FFFE) return true;
if (codepoint <= 0x10FFFF) return false;
return true;
}
/**
* Validates that the given string value only contains text characters and
* returns the first illegal code point if one is found.
*/
public static OptionalInt validateTextString(String string) {
for (int i = 0; i < string.length(); i++) {
int codePoint = string.codePointAt(i);
if ( ! Text.isTextCharacter(codePoint))
return OptionalInt.of(codePoint);
if (Character.isHighSurrogate(string.charAt(i)))
++i;
}
return OptionalInt.empty();
}
/**
* Returns a string where any invalid characters in the input string is replaced by spaces
*/
} |
This doesn't do anything for `FileOutputStream` and for buffered writers `flush()` should be called by `close()` anyway. | public static void writeToFile(String path, byte[] data) throws IOException {
String tmpPath = path + ".tmp";
try (FileOutputStream stream = new FileOutputStream(tmpPath)) {
stream.write(data);
stream.flush();
}
Files.move(FileSystems.getDefault().getPath(tmpPath), FileSystems.getDefault().getPath(path), ATOMIC_MOVE);
} | stream.flush(); | public static void writeToFile(String path, byte[] data) throws IOException {
String tmpPath = path + ".tmp";
try (FileOutputStream stream = new FileOutputStream(tmpPath)) {
stream.write(data);
}
Files.move(FileSystems.getDefault().getPath(tmpPath), FileSystems.getDefault().getPath(path), ATOMIC_MOVE);
} | class TestFileUtil {
protected static final String DATA_PATH = "./test/crosslanguagefiles";
/**
* Write `data` to `path` using UTF-8 as binary encoding format.
*/
public static void writeToFile(String path, String data) throws IOException {
writeToFile(path, data.getBytes(Charset.forName("UTF-8")));
}
/**
* Returns the path to use for data files.
*
* @param filename The name of the file to include in the path.
* @return The data file path.
*/
public static String getPath(String filename) {
return DATA_PATH + "/" + filename;
}
public static byte[] readFile(String path) throws IOException {
return Files.readAllBytes(Paths.get(path));
}
} | class TestFileUtil {
protected static final String DATA_PATH = "./test/crosslanguagefiles";
/**
* Write `data` to `path` using UTF-8 as binary encoding format.
*/
public static void writeToFile(String path, String data) throws IOException {
writeToFile(path, data.getBytes(Charset.forName("UTF-8")));
}
/**
* Returns the path to use for data files.
*
* @param filename The name of the file to include in the path.
* @return The data file path.
*/
public static String getPath(String filename) {
return DATA_PATH + "/" + filename;
}
public static byte[] readFile(String path) throws IOException {
return Files.readAllBytes(Paths.get(path));
}
} |
Removed | public static void writeToFile(String path, byte[] data) throws IOException {
String tmpPath = path + ".tmp";
try (FileOutputStream stream = new FileOutputStream(tmpPath)) {
stream.write(data);
stream.flush();
}
Files.move(FileSystems.getDefault().getPath(tmpPath), FileSystems.getDefault().getPath(path), ATOMIC_MOVE);
} | stream.flush(); | public static void writeToFile(String path, byte[] data) throws IOException {
String tmpPath = path + ".tmp";
try (FileOutputStream stream = new FileOutputStream(tmpPath)) {
stream.write(data);
}
Files.move(FileSystems.getDefault().getPath(tmpPath), FileSystems.getDefault().getPath(path), ATOMIC_MOVE);
} | class TestFileUtil {
protected static final String DATA_PATH = "./test/crosslanguagefiles";
/**
* Write `data` to `path` using UTF-8 as binary encoding format.
*/
public static void writeToFile(String path, String data) throws IOException {
writeToFile(path, data.getBytes(Charset.forName("UTF-8")));
}
/**
* Returns the path to use for data files.
*
* @param filename The name of the file to include in the path.
* @return The data file path.
*/
public static String getPath(String filename) {
return DATA_PATH + "/" + filename;
}
public static byte[] readFile(String path) throws IOException {
return Files.readAllBytes(Paths.get(path));
}
} | class TestFileUtil {
protected static final String DATA_PATH = "./test/crosslanguagefiles";
/**
* Write `data` to `path` using UTF-8 as binary encoding format.
*/
public static void writeToFile(String path, String data) throws IOException {
writeToFile(path, data.getBytes(Charset.forName("UTF-8")));
}
/**
* Returns the path to use for data files.
*
* @param filename The name of the file to include in the path.
* @return The data file path.
*/
public static String getPath(String filename) {
return DATA_PATH + "/" + filename;
}
public static byte[] readFile(String path) throws IOException {
return Files.readAllBytes(Paths.get(path));
}
} |
This can be removed once #5497 has been released. | public void setNodeState(String hostName, Node.State nodeState) {
String state = nodeState == Node.State.ready ? "availablefornewallocations" : nodeState.name();
NodeMessageResponse response = configServerApi.put(
"/nodes/v2/state/" + state + "/" + hostName,
Optional.empty(), /* body */
NodeMessageResponse.class);
NODE_ADMIN_LOGGER.info(response.message);
if (response.errorCode == null || response.errorCode.isEmpty()) {
return;
}
throw new RuntimeException("Unexpected message " + response.message + " " + response.errorCode);
} | String state = nodeState == Node.State.ready ? "availablefornewallocations" : nodeState.name(); | public void setNodeState(String hostName, Node.State nodeState) {
String state = nodeState == Node.State.ready ? "availablefornewallocations" : nodeState.name();
NodeMessageResponse response = configServerApi.put(
"/nodes/v2/state/" + state + "/" + hostName,
Optional.empty(), /* body */
NodeMessageResponse.class);
NODE_ADMIN_LOGGER.info(response.message);
if (response.errorCode == null || response.errorCode.isEmpty()) {
return;
}
throw new RuntimeException("Unexpected message " + response.message + " " + response.errorCode);
} | class RealNodeRepository implements NodeRepository {
private static final PrefixLogger NODE_ADMIN_LOGGER = PrefixLogger.getNodeAdminLogger(RealNodeRepository.class);
private final ConfigServerApi configServerApi;
public RealNodeRepository(ConfigServerApi configServerApi) {
this.configServerApi = configServerApi;
}
@Override
public List<NodeRepositoryNode> getNodes(String baseHostName) {
return getNodes(Optional.of(baseHostName), Collections.emptyList());
}
@Override
public List<NodeRepositoryNode> getNodes(NodeType... nodeTypes) {
if (nodeTypes.length == 0)
throw new IllegalArgumentException("Must specify at least 1 node type");
return getNodes(Optional.empty(), Arrays.asList(nodeTypes));
}
private List<NodeRepositoryNode> getNodes(Optional<String> baseHostName, List<NodeType> nodeTypeList) {
Optional<String> nodeTypes = Optional
.of(nodeTypeList.stream().map(NodeType::name).collect(Collectors.joining(",")))
.filter(StringUtils::isNotEmpty);
String path = "/nodes/v2/node/?recursive=true" +
baseHostName.map(base -> "&parentHost=" + base).orElse("") +
nodeTypes.map(types -> "&type=" + types).orElse("");
final GetNodesResponse nodesForHost = configServerApi.get(path, GetNodesResponse.class);
return nodesForHost.nodes.stream()
.map(RealNodeRepository::createNodeRepositoryNode)
.collect(Collectors.toList());
}
@Override
public Optional<NodeRepositoryNode> getNode(String hostName) {
try {
GetNodesResponse.Node nodeResponse = configServerApi.get("/nodes/v2/node/" + hostName,
GetNodesResponse.Node.class);
if (nodeResponse == null) {
return Optional.empty();
}
return Optional.of(createNodeRepositoryNode(nodeResponse));
} catch (HttpException.NotFoundException|HttpException.ForbiddenException e) {
return Optional.empty();
}
}
@Override
public List<NodeAcl> getNodeAcl(String hostName) {
try {
final String path = String.format("/nodes/v2/acl/%s?children=true", hostName);
final GetAclResponse response = configServerApi.get(path, GetAclResponse.class);
return response.trustedNodes.stream()
.map(node -> new NodeAcl(
node.hostname, node.ipAddress, ContainerName.fromHostname(node.trustedBy)))
.collect(Collectors.toList());
} catch (HttpException.NotFoundException e) {
return Collections.emptyList();
}
}
private static NodeRepositoryNode createNodeRepositoryNode(GetNodesResponse.Node node)
throws IllegalArgumentException, NullPointerException {
Objects.requireNonNull(node.nodeType, "Unknown node type");
NodeType nodeType = NodeType.valueOf(node.nodeType);
Objects.requireNonNull(node.nodeState, "Unknown node state");
Node.State nodeState = Node.State.valueOf(node.nodeState);
if (nodeState == Node.State.active) {
Objects.requireNonNull(node.wantedVespaVersion, "Unknown vespa version for active node");
Objects.requireNonNull(node.wantedDockerImage, "Unknown docker image for active node");
Objects.requireNonNull(node.wantedRestartGeneration, "Unknown wantedRestartGeneration for active node");
Objects.requireNonNull(node.currentRestartGeneration, "Unknown currentRestartGeneration for active node");
}
String hostName = Objects.requireNonNull(node.hostname, "hostname is null");
NodeRepositoryNode.Owner owner = null;
if (node.owner != null) {
owner = new NodeRepositoryNode.Owner(node.owner.tenant, node.owner.application, node.owner.instance);
}
NodeRepositoryNode.Membership membership = null;
if (node.membership != null) {
membership = new NodeRepositoryNode.Membership(node.membership.clusterType, node.membership.clusterId,
node.membership.group, node.membership.index, node.membership.retired);
}
return new NodeRepositoryNode(
hostName,
Optional.ofNullable(node.wantedDockerImage).map(DockerImage::new),
Optional.ofNullable(node.currentDockerImage).map(DockerImage::new),
nodeState,
nodeType,
node.nodeFlavor,
node.nodeCanonicalFlavor,
Optional.ofNullable(node.wantedVespaVersion),
Optional.ofNullable(node.vespaVersion),
Optional.ofNullable(node.allowedToBeDown),
Optional.ofNullable(owner),
Optional.ofNullable(membership),
Optional.ofNullable(node.wantedRestartGeneration),
Optional.ofNullable(node.currentRestartGeneration),
node.wantedRebootGeneration,
node.currentRebootGeneration,
node.minCpuCores,
node.minMainMemoryAvailableGb,
node.minDiskAvailableGb,
node.fastDisk,
node.ipAddresses,
Optional.ofNullable(node.hardwareDivergence),
Optional.ofNullable(node.parentHostname));
}
@Override
public void updateNodeAttributes(final String hostName, final NodeAttributes nodeAttributes) {
UpdateNodeAttributesResponse response = configServerApi.patch(
"/nodes/v2/node/" + hostName,
new UpdateNodeAttributesRequestBody(nodeAttributes),
UpdateNodeAttributesResponse.class);
if (response.errorCode == null || response.errorCode.isEmpty()) {
return;
}
throw new RuntimeException("Unexpected message " + response.message + " " + response.errorCode);
}
@Override
} | class RealNodeRepository implements NodeRepository {
private static final PrefixLogger NODE_ADMIN_LOGGER = PrefixLogger.getNodeAdminLogger(RealNodeRepository.class);
private final ConfigServerApi configServerApi;
public RealNodeRepository(ConfigServerApi configServerApi) {
this.configServerApi = configServerApi;
}
@Override
public List<NodeSpec> getNodes(String baseHostName) {
return getNodes(Optional.of(baseHostName), Collections.emptyList());
}
@Override
public List<NodeSpec> getNodes(NodeType... nodeTypes) {
if (nodeTypes.length == 0)
throw new IllegalArgumentException("Must specify at least 1 node type");
return getNodes(Optional.empty(), Arrays.asList(nodeTypes));
}
private List<NodeSpec> getNodes(Optional<String> baseHostName, List<NodeType> nodeTypeList) {
Optional<String> nodeTypes = Optional
.of(nodeTypeList.stream().map(NodeType::name).collect(Collectors.joining(",")))
.filter(StringUtils::isNotEmpty);
String path = "/nodes/v2/node/?recursive=true" +
baseHostName.map(base -> "&parentHost=" + base).orElse("") +
nodeTypes.map(types -> "&type=" + types).orElse("");
final GetNodesResponse nodesForHost = configServerApi.get(path, GetNodesResponse.class);
return nodesForHost.nodes.stream()
.map(RealNodeRepository::createNodeRepositoryNode)
.collect(Collectors.toList());
}
@Override
public Optional<NodeSpec> getNode(String hostName) {
try {
GetNodesResponse.Node nodeResponse = configServerApi.get("/nodes/v2/node/" + hostName,
GetNodesResponse.Node.class);
if (nodeResponse == null) {
return Optional.empty();
}
return Optional.of(createNodeRepositoryNode(nodeResponse));
} catch (HttpException.NotFoundException|HttpException.ForbiddenException e) {
return Optional.empty();
}
}
@Override
public List<AclSpec> getNodesAcl(String hostName) {
try {
final String path = String.format("/nodes/v2/acl/%s?children=true", hostName);
final GetAclResponse response = configServerApi.get(path, GetAclResponse.class);
return response.trustedNodes.stream()
.map(node -> new AclSpec(
node.hostname, node.ipAddress, ContainerName.fromHostname(node.trustedBy)))
.collect(Collectors.toList());
} catch (HttpException.NotFoundException e) {
return Collections.emptyList();
}
}
private static NodeSpec createNodeRepositoryNode(GetNodesResponse.Node node)
throws IllegalArgumentException, NullPointerException {
Objects.requireNonNull(node.nodeType, "Unknown node type");
NodeType nodeType = NodeType.valueOf(node.nodeType);
Objects.requireNonNull(node.nodeState, "Unknown node state");
Node.State nodeState = Node.State.valueOf(node.nodeState);
if (nodeState == Node.State.active) {
Objects.requireNonNull(node.wantedVespaVersion, "Unknown vespa version for active node");
Objects.requireNonNull(node.wantedDockerImage, "Unknown docker image for active node");
Objects.requireNonNull(node.wantedRestartGeneration, "Unknown wantedRestartGeneration for active node");
Objects.requireNonNull(node.currentRestartGeneration, "Unknown currentRestartGeneration for active node");
}
String hostName = Objects.requireNonNull(node.hostname, "hostname is null");
NodeSpec.Owner owner = null;
if (node.owner != null) {
owner = new NodeSpec.Owner(node.owner.tenant, node.owner.application, node.owner.instance);
}
NodeSpec.Membership membership = null;
if (node.membership != null) {
membership = new NodeSpec.Membership(node.membership.clusterType, node.membership.clusterId,
node.membership.group, node.membership.index, node.membership.retired);
}
return new NodeSpec(
hostName,
Optional.ofNullable(node.wantedDockerImage).map(DockerImage::new),
Optional.ofNullable(node.currentDockerImage).map(DockerImage::new),
nodeState,
nodeType,
node.nodeFlavor,
node.nodeCanonicalFlavor,
Optional.ofNullable(node.wantedVespaVersion),
Optional.ofNullable(node.vespaVersion),
Optional.ofNullable(node.allowedToBeDown),
Optional.ofNullable(owner),
Optional.ofNullable(membership),
Optional.ofNullable(node.wantedRestartGeneration),
Optional.ofNullable(node.currentRestartGeneration),
node.wantedRebootGeneration,
node.currentRebootGeneration,
node.minCpuCores,
node.minMainMemoryAvailableGb,
node.minDiskAvailableGb,
node.fastDisk,
node.ipAddresses,
Optional.ofNullable(node.hardwareDivergence),
Optional.ofNullable(node.parentHostname));
}
@Override
public void updateNodeAttributes(final String hostName, final NodeAttributes nodeAttributes) {
UpdateNodeAttributesResponse response = configServerApi.patch(
"/nodes/v2/node/" + hostName,
new UpdateNodeAttributesRequestBody(nodeAttributes),
UpdateNodeAttributesResponse.class);
if (response.errorCode == null || response.errorCode.isEmpty()) {
return;
}
throw new RuntimeException("Unexpected message " + response.message + " " + response.errorCode);
}
@Override
} |
use `Collections.emptyList()` which is also immutable. | public Acl(List<Integer> trustedPorts, List<InetAddress> trustedNodes) {
this.trustedNodes = trustedNodes != null ? ImmutableList.copyOf(trustedNodes) : new ArrayList<>();
this.trustedPorts = trustedPorts != null ? ImmutableList.copyOf(trustedPorts) : new ArrayList<>();
} | this.trustedNodes = trustedNodes != null ? ImmutableList.copyOf(trustedNodes) : new ArrayList<>(); | public Acl(List<Integer> trustedPorts, List<InetAddress> trustedNodes) {
this.trustedNodes = trustedNodes != null ? ImmutableList.copyOf(trustedNodes) : Collections.emptyList();
this.trustedPorts = trustedPorts != null ? ImmutableList.copyOf(trustedPorts) : Collections.emptyList();
} | class Acl {
private final List<InetAddress> trustedNodes;
private final List<Integer> trustedPorts;
public List<InetAddress> trustedNodes() {
return trustedNodes;
}
public List<Integer> trustedPorts() {
return trustedPorts;
}
public String toRestoreCommand(InetAddress containerAddress) {
return String.join("\n"
, "*filter"
, toListRules(containerAddress)
, "COMMIT\n");
}
public String toListRules(InetAddress containerAddress) {
IPVersion ipVersion = IPVersion.get(containerAddress);
String basics = String.join("\n"
, "-P INPUT ACCEPT"
, "-P FORWARD ACCEPT"
, "-P OUTPUT ACCEPT"
, "-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT"
, "-A INPUT -i lo -j ACCEPT"
, "-A INPUT -p " + ipVersion.icmpProtocol() + " -j ACCEPT");
String ports = trustedPorts.stream()
.map(port -> "-A INPUT -p tcp --dport " + port + " -j ACCEPT")
.collect(Collectors.joining("\n"));
String nodes = trustedNodes.stream()
.filter(ipVersion::match)
.map(ipAddress -> "-A INPUT -s " + InetAddresses.toAddrString(ipAddress) + ipVersion.singleHostCidr() + " -j ACCEPT")
.collect(Collectors.joining("\n"));
String rejectEverythingElse = "-A INPUT -j REJECT";
String redirectSelf = "-A OUTPUT -d " + InetAddresses.toAddrString(containerAddress) + " -j REDIRECT";
return String.join("\n", basics, ports, nodes, rejectEverythingElse, redirectSelf);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Acl that = (Acl) o;
return Objects.equals(trustedPorts, that.trustedPorts) &&
Objects.equals(trustedNodes, that.trustedNodes);
}
@Override
public int hashCode() {
return Objects.hash(trustedPorts, trustedNodes);
}
} | class Acl {
private final List<InetAddress> trustedNodes;
private final List<Integer> trustedPorts;
/**
* @param trustedPorts Ports that hostname should trust
* @param trustedNodes Other hostnames that this hostname should trust
*/
public String toRules(IPVersion ipVersion) {
String basics = String.join("\n"
, "-P INPUT ACCEPT"
, "-P FORWARD ACCEPT"
, "-P OUTPUT ACCEPT"
, "-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT"
, "-A INPUT -i lo -j ACCEPT"
, "-A INPUT -p " + ipVersion.icmpProtocol() + " -j ACCEPT");
String commaSeparatedPorts = trustedPorts.stream().map(i -> Integer.toString(i)).collect(Collectors.joining(","));
String ports = commaSeparatedPorts.isEmpty() ? "" : "-A INPUT -p tcp -m multiport --dports " + commaSeparatedPorts + " -j ACCEPT\n";
String nodes = trustedNodes.stream()
.filter(ipVersion::match)
.map(ipAddress -> "-A INPUT -s " + InetAddresses.toAddrString(ipAddress) + ipVersion.singleHostCidr() + " -j ACCEPT")
.collect(Collectors.joining("\n"));
String rejectEverythingElse = "-A INPUT -j REJECT --reject-with " + ipVersion.icmpPortUnreachable();
return basics + "\n" + ports + nodes + "\n" + rejectEverythingElse;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Acl that = (Acl) o;
return Objects.equals(trustedPorts, that.trustedPorts) &&
Objects.equals(trustedNodes, that.trustedNodes);
}
@Override
public int hashCode() {
return Objects.hash(trustedPorts, trustedNodes);
}
} |
Looks like we call `nodeRepository.getNodeAcls(node, aclsForChildren)` 3 times now, call it once and reuse result? | private void toSlime(String hostname, Cursor object) {
Node node = nodeRepository.getNode(hostname)
.orElseGet(() -> nodeRepository.getConfigNode(hostname)
.orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")));
Cursor trustedNodesArray = object.setArray("trustedNodes");
nodeRepository.getNodeAcls(node, aclsForChildren).forEach(nodeAcl -> toSlime(nodeAcl, trustedNodesArray));
Cursor trustedNetworksArray = object.setArray("trustedNetworks");
nodeRepository.getNodeAcls(node, aclsForChildren).forEach(nodeAcl -> toSlime(nodeAcl.trustedNetworks(),
nodeAcl.node(),
trustedNetworksArray));
Cursor trustedPortsArray = object.setArray("trustedPorts");
nodeRepository.getNodeAcls(node, aclsForChildren).forEach(nodeAcl -> toSlime(nodeAcl.trustedPorts(),
nodeAcl,
trustedPortsArray));
} | nodeRepository.getNodeAcls(node, aclsForChildren).forEach(nodeAcl -> toSlime(nodeAcl.trustedPorts(), | private void toSlime(String hostname, Cursor object) {
Node node = nodeRepository.getNode(hostname)
.orElseGet(() -> nodeRepository.getConfigNode(hostname)
.orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")));
List<NodeAcl> acls = nodeRepository.getNodeAcls(node, aclsForChildren);
Cursor trustedNodesArray = object.setArray("trustedNodes");
acls.forEach(nodeAcl -> toSlime(nodeAcl, trustedNodesArray));
Cursor trustedNetworksArray = object.setArray("trustedNetworks");
acls.forEach(nodeAcl -> toSlime(nodeAcl.trustedNetworks(), nodeAcl.node(), trustedNetworksArray));
Cursor trustedPortsArray = object.setArray("trustedPorts");
acls.forEach(nodeAcl -> toSlime(nodeAcl.trustedPorts(), nodeAcl, trustedPortsArray));
} | class NodeAclResponse extends HttpResponse {
private static final String CHILDREN_REQUEST_PROPERTY = "children";
private final NodeRepository nodeRepository;
private final Slime slime;
private final boolean aclsForChildren;
public NodeAclResponse(HttpRequest request, NodeRepository nodeRepository) {
super(200);
this.nodeRepository = nodeRepository;
this.slime = new Slime();
this.aclsForChildren = request.getBooleanProperty(CHILDREN_REQUEST_PROPERTY);
final Cursor root = slime.setObject();
final String hostname = baseName(request.getUri().getPath());
toSlime(hostname, root);
}
private void toSlime(NodeAcl nodeAcl, Cursor array) {
nodeAcl.trustedNodes().forEach(node -> node.ipAddresses().forEach(ipAddress -> {
Cursor object = array.addObject();
object.setString("hostname", node.hostname());
object.setString("type", node.type().name());
object.setString("ipAddress", ipAddress);
object.setString("trustedBy", nodeAcl.node().hostname());
}));
}
private void toSlime(Set<String> trustedNetworks, Node trustedby, Cursor array) {
trustedNetworks.forEach(network -> {
Cursor object = array.addObject();
object.setString("network", network);
object.setString("trustedBy", trustedby.hostname());
});
}
private void toSlime(Set<Integer> trustedPorts, NodeAcl trustedBy, Cursor array) {
trustedPorts.forEach(port -> {
Cursor object = array.addObject();
object.setLong("port", port);
object.setString("trustedBy", trustedBy.node().hostname());
});
}
@Override
public void render(OutputStream outputStream) throws IOException {
outputStream.write(SlimeUtils.toJsonBytes(slime));
}
@Override
public String getContentType() {
return "application/json";
}
private static String baseName(String path) {
return new File(path).getName();
}
} | class NodeAclResponse extends HttpResponse {
private static final String CHILDREN_REQUEST_PROPERTY = "children";
private final NodeRepository nodeRepository;
private final Slime slime;
private final boolean aclsForChildren;
public NodeAclResponse(HttpRequest request, NodeRepository nodeRepository) {
super(200);
this.nodeRepository = nodeRepository;
this.slime = new Slime();
this.aclsForChildren = request.getBooleanProperty(CHILDREN_REQUEST_PROPERTY);
final Cursor root = slime.setObject();
final String hostname = baseName(request.getUri().getPath());
toSlime(hostname, root);
}
private void toSlime(NodeAcl nodeAcl, Cursor array) {
nodeAcl.trustedNodes().forEach(node -> node.ipAddresses().forEach(ipAddress -> {
Cursor object = array.addObject();
object.setString("hostname", node.hostname());
object.setString("type", node.type().name());
object.setString("ipAddress", ipAddress);
object.setString("trustedBy", nodeAcl.node().hostname());
}));
}
private void toSlime(Set<String> trustedNetworks, Node trustedby, Cursor array) {
trustedNetworks.forEach(network -> {
Cursor object = array.addObject();
object.setString("network", network);
object.setString("trustedBy", trustedby.hostname());
});
}
private void toSlime(Set<Integer> trustedPorts, NodeAcl trustedBy, Cursor array) {
trustedPorts.forEach(port -> {
Cursor object = array.addObject();
object.setLong("port", port);
object.setString("trustedBy", trustedBy.node().hostname());
});
}
@Override
public void render(OutputStream outputStream) throws IOException {
outputStream.write(SlimeUtils.toJsonBytes(slime));
}
@Override
public String getContentType() {
return "application/json";
}
private static String baseName(String path) {
return new File(path).getName();
}
} |
Fixed, PTAL. | public boolean converge(TaskContext context) {
List<String> lines = uncheck(() -> Files.readAllLines(path, ENCODING));
List<String> newLines = new ArrayList<>();
StringBuilder diff = new StringBuilder();
boolean modified = false;
for (int i = 0; i < lines.size(); i++) {
String line = lines.get(i);
LineEdit edit = editor.edit(line);
switch (edit.getType()) {
case REMOVE:
modified = true;
maybeRemove(diff, line);
break;
case REPLACE: {
modified = true;
String replacementLine = edit.replacementLine();
newLines.add(replacementLine);
maybeRemove(diff, line);
maybeAdd(diff, replacementLine);
break;
}
case INSERT: {
modified = true;
String replacementLine = edit.replacementLine();
newLines.add(replacementLine);
maybeAdd(diff, replacementLine);
i--;
break;
}
case NONE:
newLines.add(line);
break;
default: throw new IllegalArgumentException("Unknown EditType " + edit.getType());
}
}
List<String> linesToAppend = editor.onComplete();
if (!linesToAppend.isEmpty()) {
modified = true;
newLines.addAll(linesToAppend);
linesToAppend.forEach(line -> maybeAdd(diff, line));
}
if (!modified) {
return false;
}
String diffDescription = diffTooLarge(diff) ? "" : ":\n" + diff.toString();
context.recordSystemModification(logger, "Patching file " + path + diffDescription);
uncheck(() -> Files.write(path, newLines, ENCODING));
return true;
} | i--; | public boolean converge(TaskContext context) {
List<String> lines = uncheck(() -> Files.readAllLines(path, ENCODING));
List<String> newLines = new ArrayList<>();
StringBuilder diff = new StringBuilder();
boolean modified = false;
for (String line : lines) {
LineEdit edit = editor.edit(line);
if (!edit.prependLines().isEmpty()) {
modified = true;
maybeAdd(diff, edit.prependLines());
newLines.addAll(edit.prependLines());
}
switch (edit.getType()) {
case REPLACE:
modified = true;
maybeRemove(diff, line);
break;
case NONE:
newLines.add(line);
break;
default: throw new IllegalArgumentException("Unknown EditType " + edit.getType());
}
if (!edit.appendLines().isEmpty()) {
modified = true;
maybeAdd(diff, edit.appendLines());
newLines.addAll(edit.appendLines());
}
}
List<String> linesToAppend = editor.onComplete();
if (!linesToAppend.isEmpty()) {
modified = true;
newLines.addAll(linesToAppend);
maybeAdd(diff, linesToAppend);
}
if (!modified) {
return false;
}
String diffDescription = diffTooLarge(diff) ? "" : ":\n" + diff.toString();
context.recordSystemModification(logger, "Patching file " + path + diffDescription);
uncheck(() -> Files.write(path, newLines, ENCODING));
return true;
} | class Editor {
private static final Logger logger = Logger.getLogger(Editor.class.getName());
private static final Charset ENCODING = StandardCharsets.UTF_8;
private static int maxLength = 300;
private final Path path;
private final LineEditor editor;
public Editor(Path path, LineEditor editor) {
this.path = path;
this.editor = editor;
}
/**
* Read the file which must be encoded in UTF-8, use the LineEditor to edit it,
* and any modifications were done write it back and return true.
*/
private static void maybeAdd(StringBuilder diff, String line) {
if (!diffTooLarge(diff)) {
diff.append('+').append(line).append('\n');
}
}
private static void maybeRemove(StringBuilder diff, String line) {
if (!diffTooLarge(diff)) {
diff.append('-').append(line).append('\n');
}
}
private static boolean diffTooLarge(StringBuilder diff) {
return diff.length() > maxLength;
}
} | class Editor {
private static final Logger logger = Logger.getLogger(Editor.class.getName());
private static final Charset ENCODING = StandardCharsets.UTF_8;
private static int maxLength = 300;
private final Path path;
private final LineEditor editor;
public Editor(Path path, LineEditor editor) {
this.path = path;
this.editor = editor;
}
/**
* Read the file which must be encoded in UTF-8, use the LineEditor to edit it,
* and any modifications were done write it back and return true.
*/
private static void maybeAdd(StringBuilder diff, List<String> lines) {
for (String line : lines) {
if (!diffTooLarge(diff)) {
diff.append('+').append(line).append('\n');
}
}
}
private static void maybeRemove(StringBuilder diff, String line) {
if (!diffTooLarge(diff)) {
diff.append('-').append(line).append('\n');
}
}
private static boolean diffTooLarge(StringBuilder diff) {
return diff.length() > maxLength;
}
} |
Fixed | public Acl(List<Integer> trustedPorts, List<InetAddress> trustedNodes) {
this.trustedNodes = trustedNodes != null ? ImmutableList.copyOf(trustedNodes) : new ArrayList<>();
this.trustedPorts = trustedPorts != null ? ImmutableList.copyOf(trustedPorts) : new ArrayList<>();
} | this.trustedNodes = trustedNodes != null ? ImmutableList.copyOf(trustedNodes) : new ArrayList<>(); | public Acl(List<Integer> trustedPorts, List<InetAddress> trustedNodes) {
this.trustedNodes = trustedNodes != null ? ImmutableList.copyOf(trustedNodes) : Collections.emptyList();
this.trustedPorts = trustedPorts != null ? ImmutableList.copyOf(trustedPorts) : Collections.emptyList();
} | class Acl {
private final List<InetAddress> trustedNodes;
private final List<Integer> trustedPorts;
public List<InetAddress> trustedNodes() {
return trustedNodes;
}
public List<Integer> trustedPorts() {
return trustedPorts;
}
public String toRestoreCommand(InetAddress containerAddress) {
return String.join("\n"
, "*filter"
, toListRules(containerAddress)
, "COMMIT\n");
}
public String toListRules(InetAddress containerAddress) {
IPVersion ipVersion = IPVersion.get(containerAddress);
String basics = String.join("\n"
, "-P INPUT ACCEPT"
, "-P FORWARD ACCEPT"
, "-P OUTPUT ACCEPT"
, "-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT"
, "-A INPUT -i lo -j ACCEPT"
, "-A INPUT -p " + ipVersion.icmpProtocol() + " -j ACCEPT");
String ports = trustedPorts.stream()
.map(port -> "-A INPUT -p tcp --dport " + port + " -j ACCEPT")
.collect(Collectors.joining("\n"));
String nodes = trustedNodes.stream()
.filter(ipVersion::match)
.map(ipAddress -> "-A INPUT -s " + InetAddresses.toAddrString(ipAddress) + ipVersion.singleHostCidr() + " -j ACCEPT")
.collect(Collectors.joining("\n"));
String rejectEverythingElse = "-A INPUT -j REJECT";
String redirectSelf = "-A OUTPUT -d " + InetAddresses.toAddrString(containerAddress) + " -j REDIRECT";
return String.join("\n", basics, ports, nodes, rejectEverythingElse, redirectSelf);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Acl that = (Acl) o;
return Objects.equals(trustedPorts, that.trustedPorts) &&
Objects.equals(trustedNodes, that.trustedNodes);
}
@Override
public int hashCode() {
return Objects.hash(trustedPorts, trustedNodes);
}
} | class Acl {
private final List<InetAddress> trustedNodes;
private final List<Integer> trustedPorts;
/**
* @param trustedPorts Ports that hostname should trust
* @param trustedNodes Other hostnames that this hostname should trust
*/
public String toRules(IPVersion ipVersion) {
String basics = String.join("\n"
, "-P INPUT ACCEPT"
, "-P FORWARD ACCEPT"
, "-P OUTPUT ACCEPT"
, "-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT"
, "-A INPUT -i lo -j ACCEPT"
, "-A INPUT -p " + ipVersion.icmpProtocol() + " -j ACCEPT");
String commaSeparatedPorts = trustedPorts.stream().map(i -> Integer.toString(i)).collect(Collectors.joining(","));
String ports = commaSeparatedPorts.isEmpty() ? "" : "-A INPUT -p tcp -m multiport --dports " + commaSeparatedPorts + " -j ACCEPT\n";
String nodes = trustedNodes.stream()
.filter(ipVersion::match)
.map(ipAddress -> "-A INPUT -s " + InetAddresses.toAddrString(ipAddress) + ipVersion.singleHostCidr() + " -j ACCEPT")
.collect(Collectors.joining("\n"));
String rejectEverythingElse = "-A INPUT -j REJECT --reject-with " + ipVersion.icmpPortUnreachable();
return basics + "\n" + ports + nodes + "\n" + rejectEverythingElse;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Acl that = (Acl) o;
return Objects.equals(trustedPorts, that.trustedPorts) &&
Objects.equals(trustedNodes, that.trustedNodes);
}
@Override
public int hashCode() {
return Objects.hash(trustedPorts, trustedNodes);
}
} |
One time it is | private void toSlime(String hostname, Cursor object) {
Node node = nodeRepository.getNode(hostname)
.orElseGet(() -> nodeRepository.getConfigNode(hostname)
.orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")));
Cursor trustedNodesArray = object.setArray("trustedNodes");
nodeRepository.getNodeAcls(node, aclsForChildren).forEach(nodeAcl -> toSlime(nodeAcl, trustedNodesArray));
Cursor trustedNetworksArray = object.setArray("trustedNetworks");
nodeRepository.getNodeAcls(node, aclsForChildren).forEach(nodeAcl -> toSlime(nodeAcl.trustedNetworks(),
nodeAcl.node(),
trustedNetworksArray));
Cursor trustedPortsArray = object.setArray("trustedPorts");
nodeRepository.getNodeAcls(node, aclsForChildren).forEach(nodeAcl -> toSlime(nodeAcl.trustedPorts(),
nodeAcl,
trustedPortsArray));
} | nodeRepository.getNodeAcls(node, aclsForChildren).forEach(nodeAcl -> toSlime(nodeAcl.trustedPorts(), | private void toSlime(String hostname, Cursor object) {
Node node = nodeRepository.getNode(hostname)
.orElseGet(() -> nodeRepository.getConfigNode(hostname)
.orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")));
List<NodeAcl> acls = nodeRepository.getNodeAcls(node, aclsForChildren);
Cursor trustedNodesArray = object.setArray("trustedNodes");
acls.forEach(nodeAcl -> toSlime(nodeAcl, trustedNodesArray));
Cursor trustedNetworksArray = object.setArray("trustedNetworks");
acls.forEach(nodeAcl -> toSlime(nodeAcl.trustedNetworks(), nodeAcl.node(), trustedNetworksArray));
Cursor trustedPortsArray = object.setArray("trustedPorts");
acls.forEach(nodeAcl -> toSlime(nodeAcl.trustedPorts(), nodeAcl, trustedPortsArray));
} | class NodeAclResponse extends HttpResponse {
private static final String CHILDREN_REQUEST_PROPERTY = "children";
private final NodeRepository nodeRepository;
private final Slime slime;
private final boolean aclsForChildren;
public NodeAclResponse(HttpRequest request, NodeRepository nodeRepository) {
super(200);
this.nodeRepository = nodeRepository;
this.slime = new Slime();
this.aclsForChildren = request.getBooleanProperty(CHILDREN_REQUEST_PROPERTY);
final Cursor root = slime.setObject();
final String hostname = baseName(request.getUri().getPath());
toSlime(hostname, root);
}
private void toSlime(NodeAcl nodeAcl, Cursor array) {
nodeAcl.trustedNodes().forEach(node -> node.ipAddresses().forEach(ipAddress -> {
Cursor object = array.addObject();
object.setString("hostname", node.hostname());
object.setString("type", node.type().name());
object.setString("ipAddress", ipAddress);
object.setString("trustedBy", nodeAcl.node().hostname());
}));
}
private void toSlime(Set<String> trustedNetworks, Node trustedby, Cursor array) {
trustedNetworks.forEach(network -> {
Cursor object = array.addObject();
object.setString("network", network);
object.setString("trustedBy", trustedby.hostname());
});
}
private void toSlime(Set<Integer> trustedPorts, NodeAcl trustedBy, Cursor array) {
trustedPorts.forEach(port -> {
Cursor object = array.addObject();
object.setLong("port", port);
object.setString("trustedBy", trustedBy.node().hostname());
});
}
@Override
public void render(OutputStream outputStream) throws IOException {
outputStream.write(SlimeUtils.toJsonBytes(slime));
}
@Override
public String getContentType() {
return "application/json";
}
private static String baseName(String path) {
return new File(path).getName();
}
} | class NodeAclResponse extends HttpResponse {
private static final String CHILDREN_REQUEST_PROPERTY = "children";
private final NodeRepository nodeRepository;
private final Slime slime;
private final boolean aclsForChildren;
public NodeAclResponse(HttpRequest request, NodeRepository nodeRepository) {
super(200);
this.nodeRepository = nodeRepository;
this.slime = new Slime();
this.aclsForChildren = request.getBooleanProperty(CHILDREN_REQUEST_PROPERTY);
final Cursor root = slime.setObject();
final String hostname = baseName(request.getUri().getPath());
toSlime(hostname, root);
}
private void toSlime(NodeAcl nodeAcl, Cursor array) {
nodeAcl.trustedNodes().forEach(node -> node.ipAddresses().forEach(ipAddress -> {
Cursor object = array.addObject();
object.setString("hostname", node.hostname());
object.setString("type", node.type().name());
object.setString("ipAddress", ipAddress);
object.setString("trustedBy", nodeAcl.node().hostname());
}));
}
private void toSlime(Set<String> trustedNetworks, Node trustedby, Cursor array) {
trustedNetworks.forEach(network -> {
Cursor object = array.addObject();
object.setString("network", network);
object.setString("trustedBy", trustedby.hostname());
});
}
private void toSlime(Set<Integer> trustedPorts, NodeAcl trustedBy, Cursor array) {
trustedPorts.forEach(port -> {
Cursor object = array.addObject();
object.setLong("port", port);
object.setString("trustedBy", trustedBy.node().hostname());
});
}
@Override
public void render(OutputStream outputStream) throws IOException {
outputStream.write(SlimeUtils.toJsonBytes(slime));
}
@Override
public String getContentType() {
return "application/json";
}
private static String baseName(String path) {
return new File(path).getName();
}
} |
This should work but forces the caller to keep some state to avoid making the same edit ad-infinitum. An alternative is to: - Make all edits to the current line (REPLACE, REMOVE, NONE) also allow the insertion of any number of lines prior to the currently edited line. | public boolean converge(TaskContext context) {
List<String> lines = uncheck(() -> Files.readAllLines(path, ENCODING));
List<String> newLines = new ArrayList<>();
StringBuilder diff = new StringBuilder();
boolean modified = false;
for (int i = 0; i < lines.size(); i++) {
String line = lines.get(i);
LineEdit edit = editor.edit(line);
switch (edit.getType()) {
case REMOVE:
modified = true;
maybeRemove(diff, line);
break;
case REPLACE: {
modified = true;
String replacementLine = edit.replacementLine();
newLines.add(replacementLine);
maybeRemove(diff, line);
maybeAdd(diff, replacementLine);
break;
}
case INSERT: {
modified = true;
String replacementLine = edit.replacementLine();
newLines.add(replacementLine);
maybeAdd(diff, replacementLine);
i--;
break;
}
case NONE:
newLines.add(line);
break;
default: throw new IllegalArgumentException("Unknown EditType " + edit.getType());
}
}
List<String> linesToAppend = editor.onComplete();
if (!linesToAppend.isEmpty()) {
modified = true;
newLines.addAll(linesToAppend);
linesToAppend.forEach(line -> maybeAdd(diff, line));
}
if (!modified) {
return false;
}
String diffDescription = diffTooLarge(diff) ? "" : ":\n" + diff.toString();
context.recordSystemModification(logger, "Patching file " + path + diffDescription);
uncheck(() -> Files.write(path, newLines, ENCODING));
return true;
} | i--; | public boolean converge(TaskContext context) {
List<String> lines = uncheck(() -> Files.readAllLines(path, ENCODING));
List<String> newLines = new ArrayList<>();
StringBuilder diff = new StringBuilder();
boolean modified = false;
for (String line : lines) {
LineEdit edit = editor.edit(line);
if (!edit.prependLines().isEmpty()) {
modified = true;
maybeAdd(diff, edit.prependLines());
newLines.addAll(edit.prependLines());
}
switch (edit.getType()) {
case REPLACE:
modified = true;
maybeRemove(diff, line);
break;
case NONE:
newLines.add(line);
break;
default: throw new IllegalArgumentException("Unknown EditType " + edit.getType());
}
if (!edit.appendLines().isEmpty()) {
modified = true;
maybeAdd(diff, edit.appendLines());
newLines.addAll(edit.appendLines());
}
}
List<String> linesToAppend = editor.onComplete();
if (!linesToAppend.isEmpty()) {
modified = true;
newLines.addAll(linesToAppend);
maybeAdd(diff, linesToAppend);
}
if (!modified) {
return false;
}
String diffDescription = diffTooLarge(diff) ? "" : ":\n" + diff.toString();
context.recordSystemModification(logger, "Patching file " + path + diffDescription);
uncheck(() -> Files.write(path, newLines, ENCODING));
return true;
} | class Editor {
private static final Logger logger = Logger.getLogger(Editor.class.getName());
private static final Charset ENCODING = StandardCharsets.UTF_8;
private static int maxLength = 300;
private final Path path;
private final LineEditor editor;
public Editor(Path path, LineEditor editor) {
this.path = path;
this.editor = editor;
}
/**
* Read the file which must be encoded in UTF-8, use the LineEditor to edit it,
* and any modifications were done write it back and return true.
*/
private static void maybeAdd(StringBuilder diff, String line) {
if (!diffTooLarge(diff)) {
diff.append('+').append(line).append('\n');
}
}
private static void maybeRemove(StringBuilder diff, String line) {
if (!diffTooLarge(diff)) {
diff.append('-').append(line).append('\n');
}
}
private static boolean diffTooLarge(StringBuilder diff) {
return diff.length() > maxLength;
}
} | class Editor {
private static final Logger logger = Logger.getLogger(Editor.class.getName());
private static final Charset ENCODING = StandardCharsets.UTF_8;
private static int maxLength = 300;
private final Path path;
private final LineEditor editor;
public Editor(Path path, LineEditor editor) {
this.path = path;
this.editor = editor;
}
/**
* Read the file which must be encoded in UTF-8, use the LineEditor to edit it,
* and any modifications were done write it back and return true.
*/
private static void maybeAdd(StringBuilder diff, List<String> lines) {
for (String line : lines) {
if (!diffTooLarge(diff)) {
diff.append('+').append(line).append('\n');
}
}
}
private static void maybeRemove(StringBuilder diff, String line) {
if (!diffTooLarge(diff)) {
diff.append('-').append(line).append('\n');
}
}
private static boolean diffTooLarge(StringBuilder diff) {
return diff.length() > maxLength;
}
} |
I think the excessive formatting makes this harder to read when the operators are "far away" from the references. | private Optional<Instant> completedAt(Application application, JobType jobType) {
Optional<Instant> lastSuccess = application.deploymentJobs().successAt(application.change(), jobType);
if (lastSuccess.isPresent() || ! jobType.isProduction())
return lastSuccess;
Deployment deployment = application.deployments().get(jobType.zone(controller.system()).get());
return Optional.ofNullable(deployment).map(Deployment::at)
.filter(ignored -> ! ( application.change().upgrades(deployment.version())
|| application.change().upgrades(deployment.applicationVersion()))
&& ( application.change().downgrades(deployment.version())
|| application.change().downgrades(deployment.applicationVersion())));
} | || application.change().downgrades(deployment.applicationVersion()))); | private Optional<Instant> completedAt(Application application, JobType jobType) {
Optional<Instant> lastSuccess = application.deploymentJobs().successAt(application.change(), jobType);
if (lastSuccess.isPresent() || ! jobType.isProduction())
return lastSuccess;
Deployment deployment = application.deployments().get(jobType.zone(controller.system()).get());
return Optional.ofNullable(deployment).map(Deployment::at)
.filter(ignored -> ! ( application.change().upgrades(deployment.version())
|| application.change().upgrades(deployment.applicationVersion()))
&& ( application.change().downgrades(deployment.version())
|| application.change().downgrades(deployment.applicationVersion())));
} | class DeploymentTrigger {
/**
* The max duration a job may run before we consider it dead/hanging
*/
private final Duration jobTimeout;
private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName());
private final Controller controller;
private final Clock clock;
private final DeploymentOrder order;
private final BuildService buildService;
public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) {
Objects.requireNonNull(controller, "controller cannot be null");
Objects.requireNonNull(curator, "curator cannot be null");
Objects.requireNonNull(clock, "clock cannot be null");
this.controller = controller;
this.clock = clock;
this.order = new DeploymentOrder(controller::system);
this.buildService = buildService;
this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1);
}
/**
* Returns the time in the past before which jobs are at this moment considered unresponsive
*/
public Instant jobTimeoutLimit() {
return clock.instant().minus(jobTimeout);
}
public DeploymentOrder deploymentOrder() {
return order;
}
/**
* Called each time a job completes (successfully or not) to record information used when deciding what to trigger.
*/
public void notifyOfCompletion(JobReport report) {
if ( ! applications().get(report.applicationId()).isPresent()) {
log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() +
"': Unknown application '" + report.applicationId() + "'");
return;
}
applications().lockOrThrow(report.applicationId(), application -> {
ApplicationVersion applicationVersion = report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber()))
.orElse(ApplicationVersion.unknown);
application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller);
application = application.withProjectId(report.projectId());
if (report.jobType() == JobType.component && report.success()) {
if (acceptNewApplicationVersion(application))
application = application.withChange(application.change().with(applicationVersion));
else
application = application.withOutstandingChange(Change.of(applicationVersion));
}
applications().store(application);
});
}
/**
* Finds and triggers jobs that can and should run but are currently not.
*
* Only one job is triggered each run for test jobs, since those environments have limited capacity.
*/
public void triggerReadyJobs() {
computeReadyJobs().collect(partitioningBy(job -> job.jobType().isTest()))
.entrySet().stream()
.flatMap(entry -> (entry.getKey()
? entry.getValue().stream()
.sorted(comparing(Job::isRetry)
.thenComparing(Job::applicationUpgrade)
.reversed()
.thenComparing(Job::availableSince))
.collect(groupingBy(Job::jobType))
: entry.getValue().stream()
.collect(groupingBy(Job::id)))
.values().stream()
.map(jobs -> (Runnable) jobs.stream()
.filter(job -> canTrigger(job) && trigger(job))
.limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count))
.parallel().forEach(Runnable::run);
}
/**
* Triggers the given job for the given application.
*/
public boolean trigger(Job job) {
log.info(String.format("Attempting to trigger %s for %s, deploying %s: %s", job.jobType, job.id, job.change, job.reason));
BuildService.BuildJob buildJob = new BuildService.BuildJob(job.projectId, job.jobType.jobName());
if (buildService.trigger(buildJob)) {
applications().lockOrThrow(job.id, application ->
applications().store(application.withJobTriggering(job.jobType,
clock.instant(),
application.deployVersionFor(job.jobType, controller),
application.deployApplicationVersionFor(job.jobType, controller, false)
.orElse(ApplicationVersion.unknown),
job.reason)));
return true;
}
log.log(LogLevel.WARNING, "Failed to trigger " + buildJob + " for " + job.id);
return false;
}
/**
* Triggers a change of this application
*
* @param applicationId the application to trigger
* @throws IllegalArgumentException if this application already have an ongoing change
*/
public void triggerChange(ApplicationId applicationId, Change change) {
applications().lockOrThrow(applicationId, application -> {
if (application.change().isPresent() && ! application.deploymentJobs().hasFailures())
throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " +
application.change() + " is already in progress");
application = application.withChange(change);
if (change.application().isPresent())
application = application.withOutstandingChange(Change.empty());
applications().store(application);
});
}
/**
* Cancels any ongoing upgrade of the given application
*
* @param applicationId the application to trigger
*/
public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) {
applications().lockOrThrow(applicationId, application -> {
applications().store(application.withChange(application.change().application()
.map(Change::of)
.filter(change -> keepApplicationChange)
.orElse(Change.empty())));
});
}
/**
* Finds the next step to trigger for the given application, if any, and triggers it
*/
public List<Job> computeReadyJobs(ApplicationId id) {
List<Job> jobs = new ArrayList<>();
applications().lockIfPresent(id, application -> {
List<DeploymentSpec.Step> steps = application.deploymentSpec().equals(DeploymentSpec.empty)
? Collections.singletonList(new DeploymentSpec.DeclaredZone(Environment.test))
: application.deploymentSpec().steps();
Optional<Instant> completedAt = Optional.of(clock.instant());
String reason = "Deploying " + application.change();
for (DeploymentSpec.Step step : steps) {
Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet());
Set<JobType> remainingJobs = stepJobs.stream().filter(job -> ! completedAt(application, job).isPresent()).collect(toSet());
if (remainingJobs.isEmpty()) {
if (stepJobs.isEmpty()) {
Duration delay = ((DeploymentSpec.Delay) step).duration();
completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant()));
reason += " after a delay of " + delay;
}
else {
completedAt = stepJobs.stream().map(job -> completedAt(application, job).get()).max(naturalOrder());
reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", "));
}
}
else if (completedAt.isPresent()) {
for (JobType job : remainingJobs)
jobs.add(new Job(application, job, reason, completedAt.get(), stepJobs));
completedAt = Optional.empty();
}
}
if (completedAt.isPresent())
applications().store(application.withChange(Change.empty()));
});
return jobs;
}
/**
* Returns the set of all jobs which have changes to propagate from the upstream steps, sorted by job.
*/
public Stream<Job> computeReadyJobs() {
return ApplicationList.from(applications().asList())
.notPullRequest()
.withProjectId()
.deploying()
.idList().stream()
.map(this::computeReadyJobs)
.flatMap(List::stream);
}
/**
* Returns the instant when the given application's current change was completed for the given job.
*
* Any job is complete if its current change was already successful on that job.
* A production job is also considered complete if its current change is strictly dominated by what
* is already deployed in its zone, i.e., no parts of the change are upgrades, and at least one
* part is a downgrade, regardless of the status of the job.
*/
private boolean canTrigger(Job job) {
Application application = applications().require(job.id);
if ( ! application.deploymentJobs().isDeployableTo(job.jobType.environment(), application.change()))
return false;
if (application.deploymentJobs().isRunning(job.jobType, jobTimeoutLimit()))
return false;
if ( ! job.jobType.isProduction())
return true;
if ( ! job.concurrentlyWith.containsAll(JobList.from(application)
.production()
.running(jobTimeoutLimit())
.mapToList(JobStatus::type)))
return false;
if (application.change().blockedBy(application.deploymentSpec(), clock.instant()))
return false;
return true;
}
private ApplicationController applications() {
return controller.applications();
}
private boolean acceptNewApplicationVersion(LockedApplication application) {
if ( ! application.change().isPresent()) return true;
if (application.change().application().isPresent()) return true;
if (application.deploymentJobs().hasFailures()) return true;
if ( ! application.deploymentSpec().canUpgradeAt(clock.instant())
|| ! application.deploymentSpec().canChangeRevisionAt(clock.instant()))
return true;
return false;
}
public static class Job {
private final ApplicationId id;
private final JobType jobType;
private final long projectId;
private final Change change;
private final String reason;
private final Instant availableSince;
private final boolean retry;
private final Collection<JobType> concurrentlyWith;
public Job(Application application, JobType jobType, String reason, Instant availableSince, Collection<JobType> concurrentlyWith) {
this.id = application.id();
this.jobType = jobType;
this.projectId = application.deploymentJobs().projectId().get();
this.change = application.change();
this.availableSince = availableSince;
this.concurrentlyWith = concurrentlyWith;
JobStatus status = application.deploymentJobs().jobStatus().get(jobType);
this.retry = status != null && status.jobError().filter(JobError.outOfCapacity::equals).isPresent();
this.reason = retry ? "Retrying on out of capacity" : reason;
}
public ApplicationId id() { return id; }
public JobType jobType() { return jobType; }
public long projectId() { return projectId; }
public Change change() { return change; }
public String reason() { return reason; }
public Instant availableSince() { return availableSince; }
public boolean isRetry() { return retry; }
public boolean applicationUpgrade() { return change.application().isPresent(); }
}
} | class DeploymentTrigger {
/**
* The max duration a job may run before we consider it dead/hanging
*/
private final Duration jobTimeout;
private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName());
private final Controller controller;
private final Clock clock;
private final DeploymentOrder order;
private final BuildService buildService;
public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) {
Objects.requireNonNull(controller, "controller cannot be null");
Objects.requireNonNull(curator, "curator cannot be null");
Objects.requireNonNull(clock, "clock cannot be null");
this.controller = controller;
this.clock = clock;
this.order = new DeploymentOrder(controller::system);
this.buildService = buildService;
this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1);
}
/**
* Returns the time in the past before which jobs are at this moment considered unresponsive
*/
public Instant jobTimeoutLimit() {
return clock.instant().minus(jobTimeout);
}
public DeploymentOrder deploymentOrder() {
return order;
}
/**
* Called each time a job completes (successfully or not) to record information used when deciding what to trigger.
*/
public void notifyOfCompletion(JobReport report) {
if ( ! applications().get(report.applicationId()).isPresent()) {
log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() +
"': Unknown application '" + report.applicationId() + "'");
return;
}
applications().lockOrThrow(report.applicationId(), application -> {
ApplicationVersion applicationVersion = report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber()))
.orElse(ApplicationVersion.unknown);
application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller);
application = application.withProjectId(report.projectId());
if (report.jobType() == JobType.component && report.success()) {
if (acceptNewApplicationVersion(application))
application = application.withChange(application.change().with(applicationVersion));
else
application = application.withOutstandingChange(Change.of(applicationVersion));
}
applications().store(application);
});
}
/**
* Finds and triggers jobs that can and should run but are currently not.
*
* Only one job is triggered each run for test jobs, since those environments have limited capacity.
*/
public void triggerReadyJobs() {
computeReadyJobs().collect(partitioningBy(job -> job.jobType().isTest()))
.entrySet().stream()
.flatMap(entry -> (entry.getKey()
? entry.getValue().stream()
.sorted(comparing(Job::isRetry)
.thenComparing(Job::applicationUpgrade)
.reversed()
.thenComparing(Job::availableSince))
.collect(groupingBy(Job::jobType))
: entry.getValue().stream()
.collect(groupingBy(Job::id)))
.values().stream()
.map(jobs -> (Runnable) jobs.stream()
.filter(job -> canTrigger(job) && trigger(job))
.limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count))
.parallel().forEach(Runnable::run);
}
/**
* Triggers the given job for the given application.
*/
public boolean trigger(Job job) {
log.info(String.format("Attempting to trigger %s for %s, deploying %s: %s", job.jobType, job.id, job.change, job.reason));
BuildService.BuildJob buildJob = new BuildService.BuildJob(job.projectId, job.jobType.jobName());
if (buildService.trigger(buildJob)) {
applications().lockOrThrow(job.id, application ->
applications().store(application.withJobTriggering(job.jobType,
clock.instant(),
application.deployVersionFor(job.jobType, controller),
application.deployApplicationVersionFor(job.jobType, controller, false)
.orElse(ApplicationVersion.unknown),
job.reason)));
return true;
}
log.log(LogLevel.WARNING, "Failed to trigger " + buildJob + " for " + job.id);
return false;
}
/**
* Triggers a change of this application
*
* @param applicationId the application to trigger
* @throws IllegalArgumentException if this application already have an ongoing change
*/
public void triggerChange(ApplicationId applicationId, Change change) {
applications().lockOrThrow(applicationId, application -> {
if (application.change().isPresent() && ! application.deploymentJobs().hasFailures())
throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " +
application.change() + " is already in progress");
application = application.withChange(change);
if (change.application().isPresent())
application = application.withOutstandingChange(Change.empty());
applications().store(application);
});
}
/**
* Cancels any ongoing upgrade of the given application
*
* @param applicationId the application to trigger
*/
public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) {
applications().lockOrThrow(applicationId, application -> {
applications().store(application.withChange(application.change().application()
.map(Change::of)
.filter(change -> keepApplicationChange)
.orElse(Change.empty())));
});
}
/**
* Finds the next step to trigger for the given application, if any, and triggers it
*/
public List<Job> computeReadyJobs(ApplicationId id) {
List<Job> jobs = new ArrayList<>();
applications().lockIfPresent(id, application -> {
List<DeploymentSpec.Step> steps = application.deploymentSpec().equals(DeploymentSpec.empty)
? Collections.singletonList(new DeploymentSpec.DeclaredZone(Environment.test))
: application.deploymentSpec().steps();
Optional<Instant> completedAt = Optional.of(clock.instant());
String reason = "Deploying " + application.change();
for (DeploymentSpec.Step step : steps) {
Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet());
Set<JobType> remainingJobs = stepJobs.stream().filter(job -> ! completedAt(application, job).isPresent()).collect(toSet());
if (remainingJobs.isEmpty()) {
if (stepJobs.isEmpty()) {
Duration delay = ((DeploymentSpec.Delay) step).duration();
completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant()));
reason += " after a delay of " + delay;
}
else {
completedAt = stepJobs.stream().map(job -> completedAt(application, job).get()).max(naturalOrder());
reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", "));
}
}
else if (completedAt.isPresent()) {
for (JobType job : remainingJobs)
jobs.add(new Job(application, job, reason, completedAt.get(), stepJobs));
completedAt = Optional.empty();
}
}
if (completedAt.isPresent())
applications().store(application.withChange(Change.empty()));
});
return jobs;
}
/**
* Returns the set of all jobs which have changes to propagate from the upstream steps, sorted by job.
*/
public Stream<Job> computeReadyJobs() {
return ApplicationList.from(applications().asList())
.notPullRequest()
.withProjectId()
.deploying()
.idList().stream()
.map(this::computeReadyJobs)
.flatMap(List::stream);
}
/**
* Returns the instant when the given application's current change was completed for the given job.
*
* Any job is complete if its current change was already successful on that job.
* A production job is also considered complete if its current change is strictly dominated by what
* is already deployed in its zone, i.e., no parts of the change are upgrades, and at least one
* part is a downgrade, regardless of the status of the job.
*/
private boolean canTrigger(Job job) {
Application application = applications().require(job.id);
if ( ! application.deploymentJobs().isDeployableTo(job.jobType.environment(), application.change()))
return false;
if (application.deploymentJobs().isRunning(job.jobType, jobTimeoutLimit()))
return false;
if ( ! job.jobType.isProduction())
return true;
if ( ! job.concurrentlyWith.containsAll(JobList.from(application)
.production()
.running(jobTimeoutLimit())
.mapToList(JobStatus::type)))
return false;
if (application.change().blockedBy(application.deploymentSpec(), clock.instant()))
return false;
return true;
}
private ApplicationController applications() {
return controller.applications();
}
private boolean acceptNewApplicationVersion(LockedApplication application) {
if ( ! application.change().isPresent()) return true;
if (application.change().application().isPresent()) return true;
if (application.deploymentJobs().hasFailures()) return true;
if ( ! application.deploymentSpec().canUpgradeAt(clock.instant())
|| ! application.deploymentSpec().canChangeRevisionAt(clock.instant()))
return true;
return false;
}
public static class Job {
private final ApplicationId id;
private final JobType jobType;
private final long projectId;
private final Change change;
private final String reason;
private final Instant availableSince;
private final boolean retry;
private final Collection<JobType> concurrentlyWith;
public Job(Application application, JobType jobType, String reason, Instant availableSince, Collection<JobType> concurrentlyWith) {
this.id = application.id();
this.jobType = jobType;
this.projectId = application.deploymentJobs().projectId().get();
this.change = application.change();
this.availableSince = availableSince;
this.concurrentlyWith = concurrentlyWith;
JobStatus status = application.deploymentJobs().jobStatus().get(jobType);
this.retry = status != null && status.jobError().filter(JobError.outOfCapacity::equals).isPresent();
this.reason = retry ? "Retrying on out of capacity" : reason;
}
public ApplicationId id() { return id; }
public JobType jobType() { return jobType; }
public long projectId() { return projectId; }
public Change change() { return change; }
public String reason() { return reason; }
public Instant availableSince() { return availableSince; }
public boolean isRetry() { return retry; }
public boolean applicationUpgrade() { return change.application().isPresent(); }
}
} |
Why not verify that length is the same. If actual is longer than expected, I assume that is incorrect. | public void testThatPropertiesAreReencoded() throws Exception {
FastSearcher fastSearcher = createFastSearcher();
assertEquals(100, fastSearcher.getCacheControl().capacity());
Query query = new Query("?query=ignored&dispatch.summaries=false");
query.getRanking().setQueryCache(true);
Result result = doSearch(fastSearcher, query, 0, 10);
Execution execution = new Execution(chainedAsSearchChain(fastSearcher), Execution.Context.createContextStub());
assertEquals(2, result.getHitCount());
execution.fill(result);
BasicPacket receivedPacket = mockBackend.getChannel().getLastReceived();
ByteBuffer buf = ByteBuffer.allocate(1000);
receivedPacket.encode(buf);
buf.flip();
byte[] actual = new byte[buf.remaining()];
buf.get(actual);
SessionId sessionId = query.getSessionId(false);
byte IGNORE = 69;
ByteBuffer answer = ByteBuffer.allocate(1024);
answer.put(new byte[] { 0, 0, 0, (byte)(145+sessionId.asUtf8String().getByteLength()), 0, 0, 0, -37, 0, 0, 48, 17, 0, 0, 0, 0,
IGNORE, IGNORE, IGNORE, IGNORE,
7, 'd', 'e', 'f', 'a', 'u', 'l', 't', 0, 0, 0, 0x03,
0, 0, 0, 3,
0, 0, 0, 4, 'r', 'a', 'n', 'k', 0, 0, 0, 1, 0, 0, 0, 9, 's', 'e', 's', 's', 'i', 'o', 'n', 'I', 'd'});
answer.putInt(sessionId.asUtf8String().getBytes().length);
answer.put(sessionId.asUtf8String().getBytes());
answer.put(new byte [] {
0, 0, 0, 5, 'm', 'a', 't', 'c', 'h', 0, 0, 0, 1, 0, 0, 0, 24, 'd', 'o', 'c', 'u', 'm', 'e', 'n', 't', 'd', 'b', '.', 's', 'e', 'a', 'r', 'c', 'h', 'd', 'o', 'c', 't', 'y', 'p', 'e', 0, 0, 0, 4, 't', 'e', 's', 't',
0, 0, 0, 6, 'c', 'a', 'c', 'h', 'e', 's', 0, 0, 0, 1, 0, 0, 0, 5, 'q', 'u', 'e', 'r', 'y', 0, 0, 0, 4, 't', 'r', 'u', 'e',
0, 0, 0, 2});
byte [] expected = new byte [answer.position()];
answer.flip();
answer.get(expected);
for (int i = 0; i < expected.length; ++i) {
if (expected[i] == IGNORE) {
actual[i] = IGNORE;
}
}
assertArrayEquals(expected, actual);
} | public void testThatPropertiesAreReencoded() throws Exception {
FastSearcher fastSearcher = createFastSearcher();
assertEquals(100, fastSearcher.getCacheControl().capacity());
Query query = new Query("?query=ignored&dispatch.summaries=false");
query.getRanking().setQueryCache(true);
Result result = doSearch(fastSearcher, query, 0, 10);
Execution execution = new Execution(chainedAsSearchChain(fastSearcher), Execution.Context.createContextStub());
assertEquals(2, result.getHitCount());
execution.fill(result);
BasicPacket receivedPacket = mockBackend.getChannel().getLastReceived();
ByteBuffer buf = ByteBuffer.allocate(1000);
receivedPacket.encode(buf);
buf.flip();
byte[] actual = new byte[buf.remaining()];
buf.get(actual);
SessionId sessionId = query.getSessionId(false);
byte IGNORE = 69;
ByteBuffer answer = ByteBuffer.allocate(1024);
answer.put(new byte[] { 0, 0, 0, (byte)(145+sessionId.asUtf8String().getByteLength()), 0, 0, 0, -37, 0, 0, 48, 17, 0, 0, 0, 0,
IGNORE, IGNORE, IGNORE, IGNORE,
7, 'd', 'e', 'f', 'a', 'u', 'l', 't', 0, 0, 0, 0x03,
0, 0, 0, 3,
0, 0, 0, 4, 'r', 'a', 'n', 'k', 0, 0, 0, 1, 0, 0, 0, 9, 's', 'e', 's', 's', 'i', 'o', 'n', 'I', 'd'});
answer.putInt(sessionId.asUtf8String().getBytes().length);
answer.put(sessionId.asUtf8String().getBytes());
answer.put(new byte [] {
0, 0, 0, 5, 'm', 'a', 't', 'c', 'h', 0, 0, 0, 1, 0, 0, 0, 24, 'd', 'o', 'c', 'u', 'm', 'e', 'n', 't', 'd', 'b', '.', 's', 'e', 'a', 'r', 'c', 'h', 'd', 'o', 'c', 't', 'y', 'p', 'e', 0, 0, 0, 4, 't', 'e', 's', 't',
0, 0, 0, 6, 'c', 'a', 'c', 'h', 'e', 's', 0, 0, 0, 1, 0, 0, 0, 5, 'q', 'u', 'e', 'r', 'y', 0, 0, 0, 4, 't', 'r', 'u', 'e',
0, 0, 0, 2});
byte [] expected = new byte [answer.position()];
answer.flip();
answer.get(expected);
for (int i = 0; i < expected.length; ++i) {
if (expected[i] == IGNORE) {
actual[i] = IGNORE;
}
}
assertArrayEquals(expected, actual);
} | class FastSearcherTestCase {
private final static DocumentdbInfoConfig documentdbInfoConfig = new DocumentdbInfoConfig(new DocumentdbInfoConfig.Builder());
private MockBackend mockBackend;
@Test
public void testNoNormalizing() {
Logger.getLogger(FastSearcher.class.getName()).setLevel(Level.ALL);
FastSearcher fastSearcher = new FastSearcher(new MockBackend(),
new FS4ResourcePool(1),
new MockDispatcher(Collections.emptyList()),
new SummaryParameters(null),
new ClusterParams("testhittype"),
new CacheParams(100, 1e64),
documentdbInfoConfig);
MockFSChannel.setEmptyDocsums(false);
assertEquals(100, fastSearcher.getCacheControl().capacity());
Result result = doSearch(fastSearcher, new Query("?query=ignored"), 0, 10);
assertTrue(result.hits().get(0).getRelevance().getScore() > 1000);
}
@Test
public void testNullQuery() {
Logger.getLogger(FastSearcher.class.getName()).setLevel(Level.ALL);
FastSearcher fastSearcher = new FastSearcher(new MockBackend(),
new FS4ResourcePool(1),
new MockDispatcher(Collections.emptyList()),
new SummaryParameters(null),
new ClusterParams("testhittype"),
new CacheParams(100, 1e64),
documentdbInfoConfig);
String query = "?junkparam=ignored";
Result result = doSearch(fastSearcher,new Query(query), 0, 10);
ErrorMessage message = result.hits().getError();
assertNotNull("Got error", message);
assertEquals("Null query", message.getMessage());
assertEquals(query, message.getDetailedMessage());
assertEquals(Error.NULL_QUERY.code, message.getCode());
}
@Test
public void testDispatchDotSummaries() {
Logger.getLogger(FastSearcher.class.getName()).setLevel(Level.ALL);
DocumentdbInfoConfig documentdbConfigWithOneDb =
new DocumentdbInfoConfig(new DocumentdbInfoConfig.Builder().documentdb(new DocumentdbInfoConfig.Documentdb.Builder()
.name("testDb")
.summaryclass(new DocumentdbInfoConfig.Documentdb.Summaryclass.Builder().name("simple").id(7))
.rankprofile(new DocumentdbInfoConfig.Documentdb.Rankprofile.Builder()
.name("simpler").hasRankFeatures(false).hasSummaryFeatures(false))));
List<SearchCluster.Node> nodes = new ArrayList<>();
nodes.add(new SearchCluster.Node("host1", 5000, 0));
nodes.add(new SearchCluster.Node("host2", 5000, 0));
MockFS4ResourcePool mockFs4ResourcePool = new MockFS4ResourcePool();
FastSearcher fastSearcher = new FastSearcher(new MockBackend(),
mockFs4ResourcePool,
new MockDispatcher(nodes, mockFs4ResourcePool, 1, new VipStatus()),
new SummaryParameters(null),
new ClusterParams("testhittype"),
new CacheParams(100, 1e64),
documentdbConfigWithOneDb);
{
String query = "?query=sddocname:a&summary=simple";
Result result = doSearch(fastSearcher, new Query(query), 0, 10);
doFill(fastSearcher, result);
ErrorMessage error = result.hits().getError();
assertNull("Since we don't route to the dispatcher we hit the mock backend, so no error", error);
}
{
String query = "?query=sddocname:a&ranking.queryCache";
Result result = doSearch(fastSearcher, new Query(query), 0, 10);
doFill(fastSearcher, result);
ErrorMessage error = result.hits().getError();
assertEquals("Since we don't actually run summary backends we get this error when the Dispatcher is used",
"Error response from rpc node connection to host1:0: Connection error", error.getDetailedMessage());
}
{
String query = "?query=sddocname:a&dispatch.summaries&summary=simple&ranking=simpler";
Result result = doSearch(fastSearcher, new Query(query), 0, 10);
doFill(fastSearcher, result);
ErrorMessage error = result.hits().getError();
assertEquals("Since we don't actually run summary backends we get this error when the Dispatcher is used",
"Error response from rpc node connection to host1:0: Connection error", error.getDetailedMessage());
}
}
@Test
public void testQueryWithRestrict() {
mockBackend = new MockBackend();
DocumentdbInfoConfig documentdbConfigWithOneDb =
new DocumentdbInfoConfig(new DocumentdbInfoConfig.Builder().documentdb(new DocumentdbInfoConfig.Documentdb.Builder().name("testDb")));
FastSearcher fastSearcher = new FastSearcher(mockBackend,
new FS4ResourcePool(1),
new MockDispatcher(Collections.emptyList()),
new SummaryParameters(null),
new ClusterParams("testhittype"),
new CacheParams(100, 1e64),
documentdbConfigWithOneDb);
Query query = new Query("?query=foo&model.restrict=testDb");
query.prepare();
Result result = doSearch(fastSearcher, query, 0, 10);
Packet receivedPacket = mockBackend.getChannel().getLastQueryPacket();
byte[] encoded = QueryTestCase.packetToBytes(receivedPacket);
byte[] correct = new byte[] {
0, 0, 0, 100, 0, 0, 0, -38, 0, 0, 0, 0, 0, 16, 0, 6, 0, 10,
QueryTestCase.ignored, QueryTestCase.ignored, QueryTestCase.ignored, QueryTestCase.ignored,
0, 0, 0x40, 0x03, 7, 100, 101, 102, 97, 117, 108, 116, 0, 0, 0, 1, 0, 0, 0, 5, 109, 97, 116, 99, 104, 0, 0, 0, 1, 0, 0, 0, 24, 100, 111, 99, 117, 109, 101, 110, 116, 100, 98, 46, 115, 101, 97, 114, 99, 104, 100, 111, 99, 116, 121, 112, 101, 0, 0, 0, 6, 116, 101, 115, 116, 68, 98, 0, 0, 0, 1, 0, 0, 0, 7, 68, 1, 0, 3, 102, 111, 111
};
QueryTestCase.assertEqualArrays(correct, encoded);
}
@Test
public void testSearch() {
FastSearcher fastSearcher = createFastSearcher();
assertEquals(100, fastSearcher.getCacheControl().capacity());
Result result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 10);
Execution execution = new Execution(chainedAsSearchChain(fastSearcher), Execution.Context.createContextStub());
assertEquals(2, result.getHitCount());
execution.fill(result);
assertCorrectHit1((FastHit) result.hits().get(0));
assertCorrectTypes1((FastHit) result.hits().get(0));
for (int idx = 0; idx < result.getHitCount(); idx++) {
assertTrue(!result.hits().get(idx).isCached());
}
result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 10);
assertEquals(2, result.getHitCount());
execution.fill(result);
assertCorrectHit1((FastHit) result.hits().get(0));
for (int i = 0; i < result.getHitCount(); i++) {
assertTrue(result.hits().get(i) + " should be cached",
result.hits().get(i).isCached());
}
result = doSearch(fastSearcher,new Query("?query=ignored"), 6, 3);
execution.fill(result);
result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 10);
assertEquals(2, result.getHitCount());
assertCorrectHit1((FastHit) result.hits().get(0));
assertTrue("All hits are cached and the result knows it",
result.isCached());
for (int i = 0; i < result.getHitCount(); i++) {
assertTrue(result.hits().get(i) + " should be cached",
result.hits().get(i).isCached());
}
clearCache(fastSearcher);
result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 10);
assertEquals(2, result.getHitCount());
execution.fill(result);
assertCorrectHit1((FastHit) result.hits().get(0));
assertTrue("All hits are not cached", !result.isCached());
for (int i = 0; i < result.getHitCount(); i++) {
assertTrue(!result.hits().get(i).isCached());
}
clearCache(fastSearcher);
result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 1);
assertEquals(1, result.getConcreteHitCount());
execution.fill(result);
result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 2);
assertEquals(2, result.getConcreteHitCount());
execution.fill(result);
assertTrue(result.hits().get(0).isCached());
assertFalse(result.hits().get(1).isCached());
result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 2);
assertEquals(2, result.getConcreteHitCount());
execution.fill(result);
assertTrue(result.hits().get(0).isCached());
assertTrue(result.hits().get(1).isCached());
clearCache(fastSearcher);
result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 1);
assertEquals(1, result.getConcreteHitCount());
result = doSearch(fastSearcher,new Query("?query=ignored"), 1, 1);
assertEquals(1, result.getConcreteHitCount());
for (int i = 0; i < result.getHitCount(); i++) {
assertFalse("Hit " + i + " should not be cached.",
result.hits().get(i).isCached());
}
}
private Chain<Searcher> chainedAsSearchChain(Searcher topOfChain) {
List<Searcher> searchers = new ArrayList<>();
searchers.add(topOfChain);
return new Chain<>(searchers);
}
private Result doSearch(Searcher searcher, Query query, int offset, int hits) {
query.setOffset(offset);
query.setHits(hits);
return createExecution(searcher).search(query);
}
private Execution createExecution(Searcher searcher) {
Execution.Context context = new Execution.Context(null, null, null, new RendererRegistry(Collections.emptyList()), new SimpleLinguistics());
return new Execution(chainedAsSearchChain(searcher), context);
}
private void doFill(Searcher searcher, Result result) {
createExecution(searcher).fill(result);
}
@Test
private FastSearcher createFastSearcher() {
mockBackend = new MockBackend();
ConfigGetter<DocumentdbInfoConfig> getter = new ConfigGetter<>(DocumentdbInfoConfig.class);
DocumentdbInfoConfig config = getter.getConfig("file:src/test/java/com/yahoo/prelude/fastsearch/test/documentdb-info.cfg");
MockFSChannel.resetDocstamp();
Logger.getLogger(FastSearcher.class.getName()).setLevel(Level.ALL);
return new FastSearcher(mockBackend,
new FS4ResourcePool(1),
new MockDispatcher(Collections.emptyList()),
new SummaryParameters(null),
new ClusterParams("testhittype"),
new CacheParams(100, 1e64),
config);
}
@Ignore
public void testSinglePhaseCachedSupersets() {
Logger.getLogger(FastSearcher.class.getName()).setLevel(Level.ALL);
MockFSChannel.resetDocstamp();
FastSearcher fastSearcher = new FastSearcher(new MockBackend(),
new FS4ResourcePool(1),
new MockDispatcher(Collections.emptyList()),
new SummaryParameters(null),
new ClusterParams("testhittype"),
new CacheParams(100, 1e64),
documentdbInfoConfig);
CacheControl c = fastSearcher.getCacheControl();
Result result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 2);
Query q = new Query("?query=ignored");
((WordItem) q.getModel().getQueryTree().getRoot()).setUniqueID(1);
QueryPacket queryPacket = QueryPacket.create(q);
CacheKey k = new CacheKey(queryPacket);
PacketWrapper p = c.lookup(k, q);
assertEquals(1, p.getResultPackets().size());
result = doSearch(fastSearcher,new Query("?query=ignored"), 1, 1);
p = c.lookup(k, q);
assertEquals(1, p.getResultPackets().size());
assertEquals(1, result.getConcreteHitCount());
for (int i = 0; i < result.getHitCount(); i++) {
assertTrue(result.hits().get(i).isCached());
}
result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 1);
p = c.lookup(k, q);
assertEquals(1, p.getResultPackets().size());
assertEquals(1, result.getConcreteHitCount());
for (int i = 0; i < result.getHitCount(); i++) {
assertTrue(result.hits().get(i).isCached());
}
}
@Test
public void testMultiPhaseCachedSupersets() {
Logger.getLogger(FastSearcher.class.getName()).setLevel(Level.ALL);
MockFSChannel.resetDocstamp();
FastSearcher fastSearcher = new FastSearcher(new MockBackend(),
new FS4ResourcePool(1),
new MockDispatcher(Collections.emptyList()),
new SummaryParameters(null),
new ClusterParams("testhittype"),
new CacheParams(100, 1e64),
documentdbInfoConfig);
Result result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 2);
result = doSearch(fastSearcher,new Query("?query=ignored"), 1, 1);
assertEquals(1, result.getConcreteHitCount());
for (int i = 0; i < result.getHitCount(); i++) {
assertTrue(result.hits().get(i).isCached());
if (!result.hits().get(i).isMeta()) {
assertTrue(result.hits().get(i).getFilled().isEmpty());
}
}
result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 1);
assertEquals(1, result.getConcreteHitCount());
for (int i = 0; i < result.getHitCount(); i++) {
assertTrue(result.hits().get(i).isCached());
if (!result.hits().get(i).isMeta()) {
assertTrue(result.hits().get(i).getFilled().isEmpty());
}
}
}
@Test
public void testSinglePassGroupingIsForcedWithSingleNodeGroups() {
FastSearcher fastSearcher = new FastSearcher(new MockBackend(),
new FS4ResourcePool(1),
new MockDispatcher(new SearchCluster.Node("host0", 123, 0)),
new SummaryParameters(null),
new ClusterParams("testhittype"),
new CacheParams(100, 1e64),
documentdbInfoConfig);
Query q = new Query("?query=foo");
GroupingRequest request1 = GroupingRequest.newInstance(q);
request1.setRootOperation(new AllOperation());
GroupingRequest request2 = GroupingRequest.newInstance(q);
AllOperation all = new AllOperation();
all.addChild(new EachOperation());
all.addChild(new EachOperation());
request2.setRootOperation(all);
assertForceSinglePassIs(false, q);
fastSearcher.search(q, new Execution(Execution.Context.createContextStub()));
assertForceSinglePassIs(true, q);
}
@Test
public void testSinglePassGroupingIsNotForcedWithSingleNodeGroups() {
MockDispatcher dispatcher =
new MockDispatcher(ImmutableList.of(new SearchCluster.Node("host0", 123, 0),
new SearchCluster.Node("host1", 123, 0)));
FastSearcher fastSearcher = new FastSearcher(new MockBackend(),
new FS4ResourcePool(1),
dispatcher,
new SummaryParameters(null),
new ClusterParams("testhittype"),
new CacheParams(100, 1e64),
documentdbInfoConfig);
Query q = new Query("?query=foo");
GroupingRequest request1 = GroupingRequest.newInstance(q);
request1.setRootOperation(new AllOperation());
GroupingRequest request2 = GroupingRequest.newInstance(q);
AllOperation all = new AllOperation();
all.addChild(new EachOperation());
all.addChild(new EachOperation());
request2.setRootOperation(all);
assertForceSinglePassIs(false, q);
fastSearcher.search(q, new Execution(Execution.Context.createContextStub()));
assertForceSinglePassIs(false, q);
}
private void assertForceSinglePassIs(boolean expected, Query query) {
for (GroupingRequest request : GroupingRequest.getRequests(query))
assertForceSinglePassIs(expected, request.getRootOperation());
}
private void assertForceSinglePassIs(boolean expected, GroupingOperation operation) {
assertEquals("Force single pass is " + expected + " in " + operation,
expected, operation.getForceSinglePass());
for (GroupingOperation child : operation.getChildren())
assertForceSinglePassIs(expected, child);
}
@Test
public void testPing() throws IOException, InterruptedException {
Logger.getLogger(FastSearcher.class.getName()).setLevel(Level.ALL);
BackendTestCase.MockServer server = new BackendTestCase.MockServer();
FS4ResourcePool listeners = new FS4ResourcePool(new Fs4Config(new Fs4Config.Builder()));
Backend backend = listeners.getBackend(server.host.getHostString(),server.host.getPort());
FastSearcher fastSearcher = new FastSearcher(backend,
new FS4ResourcePool(1),
new MockDispatcher(Collections.emptyList()),
new SummaryParameters(null),
new ClusterParams("testhittype"),
new CacheParams(0, 0.0d),
documentdbInfoConfig);
server.dispatch.packetData = BackendTestCase.PONG;
server.dispatch.setNoChannel();
Chain<Searcher> chain = new Chain<>(fastSearcher);
Execution e = new Execution(chain, Execution.Context.createContextStub());
Pong pong = e.ping(new Ping());
assertTrue(pong.getPongPacket().isPresent());
assertEquals(127, pong.getPongPacket().get().getDocstamp());
backend.shutdown();
server.dispatch.socket.close();
server.dispatch.connection.close();
server.worker.join();
pong.setPingInfo("blbl");
assertEquals("Result of pinging using blbl", pong.toString());
}
private void clearCache(FastSearcher fastSearcher) {
fastSearcher.getCacheControl().clear();
}
private void assertCorrectTypes1(FastHit hit) {
assertEquals(String.class, hit.getField("TITLE").getClass());
assertEquals(Integer.class, hit.getField("BYTES").getClass());
}
private void assertCorrectHit1(FastHit hit) {
assertEquals(
"StudyOfMadonna.com - Interviews, Articles, Reviews, Quotes, Essays and more..",
hit.getField("TITLE"));
assertEquals("352", hit.getField("WORDS").toString());
assertEquals(2003., hit.getRelevance().getScore(), 0.01d);
assertEquals("index:0/234/0/" + FastHit.asHexString(hit.getGlobalId()), hit.getId().toString());
assertEquals("9190", hit.getField("BYTES").toString());
assertEquals("testhittype", hit.getSource());
}
@Test
public void null_summary_is_included_in_trace() {
String summary = null;
assertThat(getTraceString(summary), containsString("summary=[null]"));
}
@Test
public void non_null_summary_is_included_in_trace() {
String summary = "all";
assertThat(getTraceString(summary), containsString("summary='all'"));
}
private String getTraceString(String summary) {
FastSearcher fastSearcher = createFastSearcher();
Query query = new Query("?query=ignored");
query.getPresentation().setSummary(summary);
query.setTraceLevel(2);
Result result = doSearch(fastSearcher, query, 0, 10);
doFill(fastSearcher, result);
Trace trace = query.getContext(false).getTrace();
final AtomicReference<String> fillTraceString = new AtomicReference<>();
trace.traceNode().accept(new TraceVisitor() {
@Override
public void visit(TraceNode traceNode) {
if (traceNode.payload() instanceof String && traceNode.payload().toString().contains("fill to dispatch"))
fillTraceString.set((String) traceNode.payload());
}
});
return fillTraceString.get();
}
} | class FastSearcherTestCase {
private final static DocumentdbInfoConfig documentdbInfoConfig = new DocumentdbInfoConfig(new DocumentdbInfoConfig.Builder());
private MockBackend mockBackend;
@Test
public void testNoNormalizing() {
Logger.getLogger(FastSearcher.class.getName()).setLevel(Level.ALL);
FastSearcher fastSearcher = new FastSearcher(new MockBackend(),
new FS4ResourcePool(1),
new MockDispatcher(Collections.emptyList()),
new SummaryParameters(null),
new ClusterParams("testhittype"),
new CacheParams(100, 1e64),
documentdbInfoConfig);
MockFSChannel.setEmptyDocsums(false);
assertEquals(100, fastSearcher.getCacheControl().capacity());
Result result = doSearch(fastSearcher, new Query("?query=ignored"), 0, 10);
assertTrue(result.hits().get(0).getRelevance().getScore() > 1000);
}
@Test
public void testNullQuery() {
Logger.getLogger(FastSearcher.class.getName()).setLevel(Level.ALL);
FastSearcher fastSearcher = new FastSearcher(new MockBackend(),
new FS4ResourcePool(1),
new MockDispatcher(Collections.emptyList()),
new SummaryParameters(null),
new ClusterParams("testhittype"),
new CacheParams(100, 1e64),
documentdbInfoConfig);
String query = "?junkparam=ignored";
Result result = doSearch(fastSearcher,new Query(query), 0, 10);
ErrorMessage message = result.hits().getError();
assertNotNull("Got error", message);
assertEquals("Null query", message.getMessage());
assertEquals(query, message.getDetailedMessage());
assertEquals(Error.NULL_QUERY.code, message.getCode());
}
@Test
public void testDispatchDotSummaries() {
Logger.getLogger(FastSearcher.class.getName()).setLevel(Level.ALL);
DocumentdbInfoConfig documentdbConfigWithOneDb =
new DocumentdbInfoConfig(new DocumentdbInfoConfig.Builder().documentdb(new DocumentdbInfoConfig.Documentdb.Builder()
.name("testDb")
.summaryclass(new DocumentdbInfoConfig.Documentdb.Summaryclass.Builder().name("simple").id(7))
.rankprofile(new DocumentdbInfoConfig.Documentdb.Rankprofile.Builder()
.name("simpler").hasRankFeatures(false).hasSummaryFeatures(false))));
List<SearchCluster.Node> nodes = new ArrayList<>();
nodes.add(new SearchCluster.Node("host1", 5000, 0));
nodes.add(new SearchCluster.Node("host2", 5000, 0));
MockFS4ResourcePool mockFs4ResourcePool = new MockFS4ResourcePool();
FastSearcher fastSearcher = new FastSearcher(new MockBackend(),
mockFs4ResourcePool,
new MockDispatcher(nodes, mockFs4ResourcePool, 1, new VipStatus()),
new SummaryParameters(null),
new ClusterParams("testhittype"),
new CacheParams(100, 1e64),
documentdbConfigWithOneDb);
{
String query = "?query=sddocname:a&summary=simple";
Result result = doSearch(fastSearcher, new Query(query), 0, 10);
doFill(fastSearcher, result);
ErrorMessage error = result.hits().getError();
assertNull("Since we don't route to the dispatcher we hit the mock backend, so no error", error);
}
{
String query = "?query=sddocname:a&ranking.queryCache";
Result result = doSearch(fastSearcher, new Query(query), 0, 10);
doFill(fastSearcher, result);
ErrorMessage error = result.hits().getError();
assertEquals("Since we don't actually run summary backends we get this error when the Dispatcher is used",
"Error response from rpc node connection to host1:0: Connection error", error.getDetailedMessage());
}
{
String query = "?query=sddocname:a&dispatch.summaries&summary=simple&ranking=simpler";
Result result = doSearch(fastSearcher, new Query(query), 0, 10);
doFill(fastSearcher, result);
ErrorMessage error = result.hits().getError();
assertEquals("Since we don't actually run summary backends we get this error when the Dispatcher is used",
"Error response from rpc node connection to host1:0: Connection error", error.getDetailedMessage());
}
}
@Test
public void testQueryWithRestrict() {
mockBackend = new MockBackend();
DocumentdbInfoConfig documentdbConfigWithOneDb =
new DocumentdbInfoConfig(new DocumentdbInfoConfig.Builder().documentdb(new DocumentdbInfoConfig.Documentdb.Builder().name("testDb")));
FastSearcher fastSearcher = new FastSearcher(mockBackend,
new FS4ResourcePool(1),
new MockDispatcher(Collections.emptyList()),
new SummaryParameters(null),
new ClusterParams("testhittype"),
new CacheParams(100, 1e64),
documentdbConfigWithOneDb);
Query query = new Query("?query=foo&model.restrict=testDb");
query.prepare();
Result result = doSearch(fastSearcher, query, 0, 10);
Packet receivedPacket = mockBackend.getChannel().getLastQueryPacket();
byte[] encoded = QueryTestCase.packetToBytes(receivedPacket);
byte[] correct = new byte[] {
0, 0, 0, 100, 0, 0, 0, -38, 0, 0, 0, 0, 0, 16, 0, 6, 0, 10,
QueryTestCase.ignored, QueryTestCase.ignored, QueryTestCase.ignored, QueryTestCase.ignored,
0, 0, 0x40, 0x03, 7, 100, 101, 102, 97, 117, 108, 116, 0, 0, 0, 1, 0, 0, 0, 5, 109, 97, 116, 99, 104, 0, 0, 0, 1, 0, 0, 0, 24, 100, 111, 99, 117, 109, 101, 110, 116, 100, 98, 46, 115, 101, 97, 114, 99, 104, 100, 111, 99, 116, 121, 112, 101, 0, 0, 0, 6, 116, 101, 115, 116, 68, 98, 0, 0, 0, 1, 0, 0, 0, 7, 68, 1, 0, 3, 102, 111, 111
};
QueryTestCase.assertEqualArrays(correct, encoded);
}
@Test
public void testSearch() {
FastSearcher fastSearcher = createFastSearcher();
assertEquals(100, fastSearcher.getCacheControl().capacity());
Result result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 10);
Execution execution = new Execution(chainedAsSearchChain(fastSearcher), Execution.Context.createContextStub());
assertEquals(2, result.getHitCount());
execution.fill(result);
assertCorrectHit1((FastHit) result.hits().get(0));
assertCorrectTypes1((FastHit) result.hits().get(0));
for (int idx = 0; idx < result.getHitCount(); idx++) {
assertTrue(!result.hits().get(idx).isCached());
}
result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 10);
assertEquals(2, result.getHitCount());
execution.fill(result);
assertCorrectHit1((FastHit) result.hits().get(0));
for (int i = 0; i < result.getHitCount(); i++) {
assertTrue(result.hits().get(i) + " should be cached",
result.hits().get(i).isCached());
}
result = doSearch(fastSearcher,new Query("?query=ignored"), 6, 3);
execution.fill(result);
result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 10);
assertEquals(2, result.getHitCount());
assertCorrectHit1((FastHit) result.hits().get(0));
assertTrue("All hits are cached and the result knows it",
result.isCached());
for (int i = 0; i < result.getHitCount(); i++) {
assertTrue(result.hits().get(i) + " should be cached",
result.hits().get(i).isCached());
}
clearCache(fastSearcher);
result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 10);
assertEquals(2, result.getHitCount());
execution.fill(result);
assertCorrectHit1((FastHit) result.hits().get(0));
assertTrue("All hits are not cached", !result.isCached());
for (int i = 0; i < result.getHitCount(); i++) {
assertTrue(!result.hits().get(i).isCached());
}
clearCache(fastSearcher);
result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 1);
assertEquals(1, result.getConcreteHitCount());
execution.fill(result);
result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 2);
assertEquals(2, result.getConcreteHitCount());
execution.fill(result);
assertTrue(result.hits().get(0).isCached());
assertFalse(result.hits().get(1).isCached());
result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 2);
assertEquals(2, result.getConcreteHitCount());
execution.fill(result);
assertTrue(result.hits().get(0).isCached());
assertTrue(result.hits().get(1).isCached());
clearCache(fastSearcher);
result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 1);
assertEquals(1, result.getConcreteHitCount());
result = doSearch(fastSearcher,new Query("?query=ignored"), 1, 1);
assertEquals(1, result.getConcreteHitCount());
for (int i = 0; i < result.getHitCount(); i++) {
assertFalse("Hit " + i + " should not be cached.",
result.hits().get(i).isCached());
}
}
private Chain<Searcher> chainedAsSearchChain(Searcher topOfChain) {
List<Searcher> searchers = new ArrayList<>();
searchers.add(topOfChain);
return new Chain<>(searchers);
}
private Result doSearch(Searcher searcher, Query query, int offset, int hits) {
query.setOffset(offset);
query.setHits(hits);
return createExecution(searcher).search(query);
}
private Execution createExecution(Searcher searcher) {
Execution.Context context = new Execution.Context(null, null, null, new RendererRegistry(Collections.emptyList()), new SimpleLinguistics());
return new Execution(chainedAsSearchChain(searcher), context);
}
private void doFill(Searcher searcher, Result result) {
createExecution(searcher).fill(result);
}
@Test
private FastSearcher createFastSearcher() {
mockBackend = new MockBackend();
ConfigGetter<DocumentdbInfoConfig> getter = new ConfigGetter<>(DocumentdbInfoConfig.class);
DocumentdbInfoConfig config = getter.getConfig("file:src/test/java/com/yahoo/prelude/fastsearch/test/documentdb-info.cfg");
MockFSChannel.resetDocstamp();
Logger.getLogger(FastSearcher.class.getName()).setLevel(Level.ALL);
return new FastSearcher(mockBackend,
new FS4ResourcePool(1),
new MockDispatcher(Collections.emptyList()),
new SummaryParameters(null),
new ClusterParams("testhittype"),
new CacheParams(100, 1e64),
config);
}
@Ignore
public void testSinglePhaseCachedSupersets() {
Logger.getLogger(FastSearcher.class.getName()).setLevel(Level.ALL);
MockFSChannel.resetDocstamp();
FastSearcher fastSearcher = new FastSearcher(new MockBackend(),
new FS4ResourcePool(1),
new MockDispatcher(Collections.emptyList()),
new SummaryParameters(null),
new ClusterParams("testhittype"),
new CacheParams(100, 1e64),
documentdbInfoConfig);
CacheControl c = fastSearcher.getCacheControl();
Result result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 2);
Query q = new Query("?query=ignored");
((WordItem) q.getModel().getQueryTree().getRoot()).setUniqueID(1);
QueryPacket queryPacket = QueryPacket.create(q);
CacheKey k = new CacheKey(queryPacket);
PacketWrapper p = c.lookup(k, q);
assertEquals(1, p.getResultPackets().size());
result = doSearch(fastSearcher,new Query("?query=ignored"), 1, 1);
p = c.lookup(k, q);
assertEquals(1, p.getResultPackets().size());
assertEquals(1, result.getConcreteHitCount());
for (int i = 0; i < result.getHitCount(); i++) {
assertTrue(result.hits().get(i).isCached());
}
result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 1);
p = c.lookup(k, q);
assertEquals(1, p.getResultPackets().size());
assertEquals(1, result.getConcreteHitCount());
for (int i = 0; i < result.getHitCount(); i++) {
assertTrue(result.hits().get(i).isCached());
}
}
@Test
public void testMultiPhaseCachedSupersets() {
Logger.getLogger(FastSearcher.class.getName()).setLevel(Level.ALL);
MockFSChannel.resetDocstamp();
FastSearcher fastSearcher = new FastSearcher(new MockBackend(),
new FS4ResourcePool(1),
new MockDispatcher(Collections.emptyList()),
new SummaryParameters(null),
new ClusterParams("testhittype"),
new CacheParams(100, 1e64),
documentdbInfoConfig);
Result result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 2);
result = doSearch(fastSearcher,new Query("?query=ignored"), 1, 1);
assertEquals(1, result.getConcreteHitCount());
for (int i = 0; i < result.getHitCount(); i++) {
assertTrue(result.hits().get(i).isCached());
if (!result.hits().get(i).isMeta()) {
assertTrue(result.hits().get(i).getFilled().isEmpty());
}
}
result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 1);
assertEquals(1, result.getConcreteHitCount());
for (int i = 0; i < result.getHitCount(); i++) {
assertTrue(result.hits().get(i).isCached());
if (!result.hits().get(i).isMeta()) {
assertTrue(result.hits().get(i).getFilled().isEmpty());
}
}
}
@Test
public void testSinglePassGroupingIsForcedWithSingleNodeGroups() {
FastSearcher fastSearcher = new FastSearcher(new MockBackend(),
new FS4ResourcePool(1),
new MockDispatcher(new SearchCluster.Node("host0", 123, 0)),
new SummaryParameters(null),
new ClusterParams("testhittype"),
new CacheParams(100, 1e64),
documentdbInfoConfig);
Query q = new Query("?query=foo");
GroupingRequest request1 = GroupingRequest.newInstance(q);
request1.setRootOperation(new AllOperation());
GroupingRequest request2 = GroupingRequest.newInstance(q);
AllOperation all = new AllOperation();
all.addChild(new EachOperation());
all.addChild(new EachOperation());
request2.setRootOperation(all);
assertForceSinglePassIs(false, q);
fastSearcher.search(q, new Execution(Execution.Context.createContextStub()));
assertForceSinglePassIs(true, q);
}
@Test
public void testSinglePassGroupingIsNotForcedWithSingleNodeGroups() {
MockDispatcher dispatcher =
new MockDispatcher(ImmutableList.of(new SearchCluster.Node("host0", 123, 0),
new SearchCluster.Node("host1", 123, 0)));
FastSearcher fastSearcher = new FastSearcher(new MockBackend(),
new FS4ResourcePool(1),
dispatcher,
new SummaryParameters(null),
new ClusterParams("testhittype"),
new CacheParams(100, 1e64),
documentdbInfoConfig);
Query q = new Query("?query=foo");
GroupingRequest request1 = GroupingRequest.newInstance(q);
request1.setRootOperation(new AllOperation());
GroupingRequest request2 = GroupingRequest.newInstance(q);
AllOperation all = new AllOperation();
all.addChild(new EachOperation());
all.addChild(new EachOperation());
request2.setRootOperation(all);
assertForceSinglePassIs(false, q);
fastSearcher.search(q, new Execution(Execution.Context.createContextStub()));
assertForceSinglePassIs(false, q);
}
private void assertForceSinglePassIs(boolean expected, Query query) {
for (GroupingRequest request : GroupingRequest.getRequests(query))
assertForceSinglePassIs(expected, request.getRootOperation());
}
private void assertForceSinglePassIs(boolean expected, GroupingOperation operation) {
assertEquals("Force single pass is " + expected + " in " + operation,
expected, operation.getForceSinglePass());
for (GroupingOperation child : operation.getChildren())
assertForceSinglePassIs(expected, child);
}
@Test
public void testPing() throws IOException, InterruptedException {
Logger.getLogger(FastSearcher.class.getName()).setLevel(Level.ALL);
BackendTestCase.MockServer server = new BackendTestCase.MockServer();
FS4ResourcePool listeners = new FS4ResourcePool(new Fs4Config(new Fs4Config.Builder()));
Backend backend = listeners.getBackend(server.host.getHostString(),server.host.getPort());
FastSearcher fastSearcher = new FastSearcher(backend,
new FS4ResourcePool(1),
new MockDispatcher(Collections.emptyList()),
new SummaryParameters(null),
new ClusterParams("testhittype"),
new CacheParams(0, 0.0d),
documentdbInfoConfig);
server.dispatch.packetData = BackendTestCase.PONG;
server.dispatch.setNoChannel();
Chain<Searcher> chain = new Chain<>(fastSearcher);
Execution e = new Execution(chain, Execution.Context.createContextStub());
Pong pong = e.ping(new Ping());
assertTrue(pong.getPongPacket().isPresent());
assertEquals(127, pong.getPongPacket().get().getDocstamp());
backend.shutdown();
server.dispatch.socket.close();
server.dispatch.connection.close();
server.worker.join();
pong.setPingInfo("blbl");
assertEquals("Result of pinging using blbl", pong.toString());
}
private void clearCache(FastSearcher fastSearcher) {
fastSearcher.getCacheControl().clear();
}
private void assertCorrectTypes1(FastHit hit) {
assertEquals(String.class, hit.getField("TITLE").getClass());
assertEquals(Integer.class, hit.getField("BYTES").getClass());
}
private void assertCorrectHit1(FastHit hit) {
assertEquals(
"StudyOfMadonna.com - Interviews, Articles, Reviews, Quotes, Essays and more..",
hit.getField("TITLE"));
assertEquals("352", hit.getField("WORDS").toString());
assertEquals(2003., hit.getRelevance().getScore(), 0.01d);
assertEquals("index:0/234/0/" + FastHit.asHexString(hit.getGlobalId()), hit.getId().toString());
assertEquals("9190", hit.getField("BYTES").toString());
assertEquals("testhittype", hit.getSource());
}
@Test
public void null_summary_is_included_in_trace() {
String summary = null;
assertThat(getTraceString(summary), containsString("summary=[null]"));
}
@Test
public void non_null_summary_is_included_in_trace() {
String summary = "all";
assertThat(getTraceString(summary), containsString("summary='all'"));
}
private String getTraceString(String summary) {
FastSearcher fastSearcher = createFastSearcher();
Query query = new Query("?query=ignored");
query.getPresentation().setSummary(summary);
query.setTraceLevel(2);
Result result = doSearch(fastSearcher, query, 0, 10);
doFill(fastSearcher, result);
Trace trace = query.getContext(false).getTrace();
final AtomicReference<String> fillTraceString = new AtomicReference<>();
trace.traceNode().accept(new TraceVisitor() {
@Override
public void visit(TraceNode traceNode) {
if (traceNode.payload() instanceof String && traceNode.payload().toString().contains("fill to dispatch"))
fillTraceString.set((String) traceNode.payload());
}
});
return fillTraceString.get();
}
} | |
Pretty sure they do, anyway now I don't know what your suggestion was :-) | public DeploymentInstanceSpec instance(InstanceName name) {
for (Step step : steps) {
if ( ! (step instanceof DeploymentInstanceSpec)) continue;
DeploymentInstanceSpec instanceStep = (DeploymentInstanceSpec)step;
if (instanceStep.name().equals(name))
return instanceStep;
}
return null;
} | DeploymentInstanceSpec instanceStep = (DeploymentInstanceSpec)step; | public DeploymentInstanceSpec instance(InstanceName name) {
for (DeploymentInstanceSpec instance : instances()) {
if (instance.name().equals(name))
return instance;
}
return null;
} | class DeploymentSpec {
/** The empty deployment spec, specifying no zones or rotation, and defaults for all settings */
public static final DeploymentSpec empty = new DeploymentSpec(Optional.empty(),
UpgradePolicy.defaultPolicy,
Optional.empty(),
Collections.emptyList(),
Collections.emptyList(),
"<deployment version='1.0'/>",
Optional.empty(),
Optional.empty(),
Notifications.none(),
List.of());
private final List<Step> steps;
private final Optional<Integer> majorVersion;
private final String xmlForm;
public DeploymentSpec(List<Step> steps,
Optional<Integer> majorVersion,
String xmlForm) {
if (singleInstance(steps)) {
var singleInstance = (DeploymentInstanceSpec)steps.get(0);
this.steps = List.of(singleInstance.withSteps(completeSteps(singleInstance.steps())));
}
else {
this.steps = List.copyOf(completeSteps(steps));
}
this.majorVersion = majorVersion;
this.xmlForm = xmlForm;
validateTotalDelay(steps);
}
public DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy, Optional<Integer> majorVersion,
List<ChangeBlocker> changeBlockers, List<Step> steps, String xmlForm,
Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService,
Notifications notifications,
List<Endpoint> endpoints) {
this(List.of(new DeploymentInstanceSpec(InstanceName.from("default"),
steps,
upgradePolicy,
changeBlockers,
globalServiceId,
athenzDomain,
athenzService,
notifications,
endpoints)),
majorVersion,
xmlForm);
}
/** Adds missing required steps and reorders steps to a permissible order */
private static List<DeploymentSpec.Step> completeSteps(List<DeploymentSpec.Step> inputSteps) {
List<Step> steps = new ArrayList<>(inputSteps);
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.prod)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.staging))) {
steps.add(new DeploymentSpec.DeclaredZone(Environment.staging));
}
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.staging)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.test))) {
steps.add(new DeploymentSpec.DeclaredZone(Environment.test));
}
DeploymentSpec.DeclaredZone testStep = remove(Environment.test, steps);
if (testStep != null)
steps.add(0, testStep);
DeploymentSpec.DeclaredZone stagingStep = remove(Environment.staging, steps);
if (stagingStep != null)
steps.add(1, stagingStep);
return steps;
}
/**
* Removes the first occurrence of a deployment step to the given environment and returns it.
*
* @return the removed step, or null if it is not present
*/
private static DeploymentSpec.DeclaredZone remove(Environment environment, List<DeploymentSpec.Step> steps) {
for (int i = 0; i < steps.size(); i++) {
if ( ! (steps.get(i) instanceof DeploymentSpec.DeclaredZone)) continue;
DeploymentSpec.DeclaredZone zoneStep = (DeploymentSpec.DeclaredZone)steps.get(i);
if (zoneStep.environment() == environment) {
steps.remove(i);
return zoneStep;
}
}
return null;
}
/** Throw an IllegalArgumentException if the total delay exceeds 24 hours */
private void validateTotalDelay(List<Step> steps) {
long totalDelaySeconds = steps.stream().mapToLong(step -> (step.delay().getSeconds())).sum();
if (totalDelaySeconds > Duration.ofHours(24).getSeconds())
throw new IllegalArgumentException("The total delay specified is " + Duration.ofSeconds(totalDelaySeconds) +
" but max 24 hours is allowed");
}
private DeploymentInstanceSpec defaultInstance() {
if (singleInstance(steps)) return (DeploymentInstanceSpec)steps.get(0);
throw new IllegalArgumentException("This deployment spec does not support the legacy API " +
"as it has multiple instances: " +
instances().stream().map(Step::toString).collect(Collectors.joining(",")));
}
public Optional<String> globalServiceId() { return defaultInstance().globalServiceId(); }
public UpgradePolicy upgradePolicy() { return defaultInstance().upgradePolicy(); }
/** Returns the major version this application is pinned to, or empty (default) to allow all major versions */
public Optional<Integer> majorVersion() { return majorVersion; }
public boolean canUpgradeAt(Instant instant) { return defaultInstance().canUpgradeAt(instant); }
public boolean canChangeRevisionAt(Instant instant) { return defaultInstance().canChangeRevisionAt(instant); }
public List<ChangeBlocker> changeBlocker() { return defaultInstance().changeBlocker(); }
/** Returns the deployment steps of this in the order they will be performed */
public List<Step> steps() {
if (singleInstance(steps)) return defaultInstance().steps();
return steps;
}
public List<DeclaredZone> zones() {
return defaultInstance().steps().stream()
.flatMap(step -> step.zones().stream())
.collect(Collectors.toList());
}
public Optional<AthenzDomain> athenzDomain() { return defaultInstance().athenzDomain(); }
public Optional<AthenzService> athenzService(Environment environment, RegionName region) {
return defaultInstance().athenzService(environment, region);
}
public Notifications notifications() { return defaultInstance().notifications(); }
public List<Endpoint> endpoints() { return defaultInstance().endpoints(); }
/** Returns the XML form of this spec, or null if it was not created by fromXml, nor is empty */
public String xmlForm() { return xmlForm; }
public boolean includes(Environment environment, Optional<RegionName> region) {
return defaultInstance().deploysTo(environment, region);
}
private static boolean singleInstance(List<DeploymentSpec.Step> steps) {
return steps.size() == 1 && steps.get(0) instanceof DeploymentInstanceSpec;
}
/** Returns the instance step containing the given instance name, or null if not present */
public DeploymentInstanceSpec instance(String name) {
return instance(InstanceName.from(name));
}
/** Returns the instance step containing the given instance name, or null if not present */
/** Returns the instance step containing the given instance name, or throws an IllegalArgumentException if not present */
public DeploymentInstanceSpec requireInstance(String name) {
return requireInstance(InstanceName.from(name));
}
public DeploymentInstanceSpec requireInstance(InstanceName name) {
DeploymentInstanceSpec instance = instance(name);
if (instance == null)
throw new IllegalArgumentException("No instance '" + name + "' in deployment.xml'. Instances: " +
instances().stream().map(spec -> spec.name().toString()).collect(Collectors.joining(",")));
return instance;
}
/** Returns the steps of this which are instances */
public List<DeploymentInstanceSpec> instances() {
return steps.stream()
.filter(step -> step instanceof DeploymentInstanceSpec).map(DeploymentInstanceSpec.class::cast)
.collect(Collectors.toList());
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(Reader reader) {
return new DeploymentSpecXmlReader().read(reader);
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(String xmlForm) {
return fromXml(xmlForm, true);
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(String xmlForm, boolean validate) {
return new DeploymentSpecXmlReader(validate).read(xmlForm);
}
public static String toMessageString(Throwable t) {
StringBuilder b = new StringBuilder();
String lastMessage = null;
String message;
for (; t != null; t = t.getCause()) {
message = t.getMessage();
if (message == null) continue;
if (message.equals(lastMessage)) continue;
if (b.length() > 0) {
b.append(": ");
}
b.append(message);
lastMessage = message;
}
return b.toString();
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
DeploymentSpec other = (DeploymentSpec) o;
return majorVersion.equals(other.majorVersion) &&
steps.equals(other.steps) &&
xmlForm.equals(other.xmlForm);
}
@Override
public int hashCode() {
return Objects.hash(majorVersion, steps, xmlForm);
}
/** This may be invoked by a continuous build */
public static void main(String[] args) {
if (args.length != 2 && args.length != 3) {
System.err.println("Usage: DeploymentSpec [file] [environment] [region]?" +
"Returns 0 if the specified zone matches the deployment spec, 1 otherwise");
System.exit(1);
}
try (BufferedReader reader = new BufferedReader(new FileReader(args[0]))) {
DeploymentSpec spec = DeploymentSpec.fromXml(reader);
Environment environment = Environment.from(args[1]);
Optional<RegionName> region = args.length == 3 ? Optional.of(RegionName.from(args[2])) : Optional.empty();
if (spec.includes(environment, region))
System.exit(0);
else
System.exit(1);
}
catch (Exception e) {
System.err.println("Exception checking deployment spec: " + toMessageString(e));
System.exit(1);
}
}
/** A deployment step */
public abstract static class Step {
/** Returns whether this step deploys to the given region */
public final boolean deploysTo(Environment environment) {
return deploysTo(environment, Optional.empty());
}
/** Returns whether this step deploys to the given environment, and (if specified) region */
public abstract boolean deploysTo(Environment environment, Optional<RegionName> region);
/** Returns the zones deployed to in this step */
public List<DeclaredZone> zones() { return Collections.emptyList(); }
/** The delay introduced by this step (beyond the time it takes to execute the step). Default is zero. */
public Duration delay() { return Duration.ZERO; }
/** Returns all the steps nested in this. This default implementatiino returns an empty list. */
public List<Step> steps() { return List.of(); }
}
/** A deployment step which is to wait for some time before progressing to the next step */
public static class Delay extends Step {
private final Duration duration;
public Delay(Duration duration) {
this.duration = duration;
}
public Duration duration() { return duration; }
@Override
public Duration delay() { return duration; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) { return false; }
@Override
public String toString() {
return "delay " + duration;
}
}
/** A deployment step which is to run deployment in a particular zone */
public static class DeclaredZone extends Step {
private final Environment environment;
private final Optional<RegionName> region;
private final boolean active;
private final Optional<AthenzService> athenzService;
private final Optional<String> testerFlavor;
public DeclaredZone(Environment environment) {
this(environment, Optional.empty(), false);
}
public DeclaredZone(Environment environment, Optional<RegionName> region, boolean active) {
this(environment, region, active, Optional.empty(), Optional.empty());
}
public DeclaredZone(Environment environment, Optional<RegionName> region, boolean active, Optional<AthenzService> athenzService) {
this(environment, region, active, athenzService, Optional.empty());
}
public DeclaredZone(Environment environment, Optional<RegionName> region, boolean active,
Optional<AthenzService> athenzService, Optional<String> testerFlavor) {
if (environment != Environment.prod && region.isPresent())
throw new IllegalArgumentException("Non-prod environments cannot specify a region");
if (environment == Environment.prod && region.isEmpty())
throw new IllegalArgumentException("Prod environments must be specified with a region");
this.environment = environment;
this.region = region;
this.active = active;
this.athenzService = athenzService;
this.testerFlavor = testerFlavor;
}
public Environment environment() { return environment; }
/** The region name, or empty if not declared */
public Optional<RegionName> region() { return region; }
/** Returns whether this zone should receive production traffic */
public boolean active() { return active; }
public Optional<String> testerFlavor() { return testerFlavor; }
public Optional<AthenzService> athenzService() { return athenzService; }
@Override
public List<DeclaredZone> zones() { return Collections.singletonList(this); }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) {
if (environment != this.environment) return false;
if (region.isPresent() && ! region.equals(this.region)) return false;
return true;
}
@Override
public int hashCode() {
return Objects.hash(environment, region);
}
@Override
public boolean equals(Object o) {
if (o == this) return true;
if ( ! (o instanceof DeclaredZone)) return false;
DeclaredZone other = (DeclaredZone)o;
if (this.environment != other.environment) return false;
if ( ! this.region.equals(other.region())) return false;
return true;
}
@Override
public String toString() {
return environment + (region.map(regionName -> "." + regionName).orElse(""));
}
}
/** A deployment step which is to run multiple steps (zones or instances) in parallel */
public static class ParallelZones extends Step {
private final List<Step> steps;
public ParallelZones(List<Step> steps) {
this.steps = List.copyOf(steps);
}
/** Returns the steps inside this which are zones */
@Override
public List<DeclaredZone> zones() {
return this.steps.stream()
.filter(step -> step instanceof DeclaredZone)
.map(DeclaredZone.class::cast)
.collect(Collectors.toList());
}
/** Returns all the steps nested in this */
@Override
public List<Step> steps() { return steps; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) {
return steps().stream().anyMatch(zone -> zone.deploysTo(environment, region));
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof ParallelZones)) return false;
ParallelZones that = (ParallelZones) o;
return Objects.equals(steps, that.steps);
}
@Override
public int hashCode() {
return Objects.hash(steps);
}
@Override
public String toString() {
return steps.size() + " parallel steps";
}
}
/** Controls when this application will be upgraded to new Vespa versions */
public enum UpgradePolicy {
/** Canary: Applications with this policy will upgrade before any other */
canary,
/** Default: Will upgrade after all canary applications upgraded successfully. The default. */
defaultPolicy,
/** Will upgrade after most default applications upgraded successfully */
conservative
}
/** A blocking of changes in a given time window */
public static class ChangeBlocker {
private final boolean revision;
private final boolean version;
private final TimeWindow window;
public ChangeBlocker(boolean revision, boolean version, TimeWindow window) {
this.revision = revision;
this.version = version;
this.window = window;
}
public boolean blocksRevisions() { return revision; }
public boolean blocksVersions() { return version; }
public TimeWindow window() { return window; }
@Override
public String toString() {
return "change blocker revision=" + revision + " version=" + version + " window=" + window;
}
}
} | class DeploymentSpec {
/** The empty deployment spec, specifying no zones or rotation, and defaults for all settings */
public static final DeploymentSpec empty = new DeploymentSpec(Optional.empty(),
UpgradePolicy.defaultPolicy,
Optional.empty(),
Collections.emptyList(),
Collections.emptyList(),
"<deployment version='1.0'/>",
Optional.empty(),
Optional.empty(),
Notifications.none(),
List.of());
private final List<Step> steps;
private final Optional<Integer> majorVersion;
private final String xmlForm;
public DeploymentSpec(List<Step> steps,
Optional<Integer> majorVersion,
String xmlForm) {
if (singleInstance(steps)) {
var singleInstance = (DeploymentInstanceSpec)steps.get(0);
this.steps = List.of(singleInstance.withSteps(completeSteps(singleInstance.steps())));
}
else {
this.steps = List.copyOf(completeSteps(steps));
}
this.majorVersion = majorVersion;
this.xmlForm = xmlForm;
validateTotalDelay(steps);
}
public DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy, Optional<Integer> majorVersion,
List<ChangeBlocker> changeBlockers, List<Step> steps, String xmlForm,
Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService,
Notifications notifications,
List<Endpoint> endpoints) {
this(List.of(new DeploymentInstanceSpec(InstanceName.from("default"),
steps,
upgradePolicy,
changeBlockers,
globalServiceId,
athenzDomain,
athenzService,
notifications,
endpoints)),
majorVersion,
xmlForm);
}
/** Adds missing required steps and reorders steps to a permissible order */
private static List<DeploymentSpec.Step> completeSteps(List<DeploymentSpec.Step> inputSteps) {
List<Step> steps = new ArrayList<>(inputSteps);
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.prod)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.staging))) {
steps.add(new DeploymentSpec.DeclaredZone(Environment.staging));
}
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.staging)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.test))) {
steps.add(new DeploymentSpec.DeclaredZone(Environment.test));
}
DeploymentSpec.DeclaredZone testStep = remove(Environment.test, steps);
if (testStep != null)
steps.add(0, testStep);
DeploymentSpec.DeclaredZone stagingStep = remove(Environment.staging, steps);
if (stagingStep != null)
steps.add(1, stagingStep);
return steps;
}
/**
* Removes the first occurrence of a deployment step to the given environment and returns it.
*
* @return the removed step, or null if it is not present
*/
private static DeploymentSpec.DeclaredZone remove(Environment environment, List<DeploymentSpec.Step> steps) {
for (int i = 0; i < steps.size(); i++) {
if ( ! (steps.get(i) instanceof DeploymentSpec.DeclaredZone)) continue;
DeploymentSpec.DeclaredZone zoneStep = (DeploymentSpec.DeclaredZone)steps.get(i);
if (zoneStep.environment() == environment) {
steps.remove(i);
return zoneStep;
}
}
return null;
}
/** Throw an IllegalArgumentException if the total delay exceeds 24 hours */
private void validateTotalDelay(List<Step> steps) {
long totalDelaySeconds = steps.stream().mapToLong(step -> (step.delay().getSeconds())).sum();
if (totalDelaySeconds > Duration.ofHours(24).getSeconds())
throw new IllegalArgumentException("The total delay specified is " + Duration.ofSeconds(totalDelaySeconds) +
" but max 24 hours is allowed");
}
private DeploymentInstanceSpec defaultInstance() {
if (singleInstance(steps)) return (DeploymentInstanceSpec)steps.get(0);
throw new IllegalArgumentException("This deployment spec does not support the legacy API " +
"as it has multiple instances: " +
instances().stream().map(Step::toString).collect(Collectors.joining(",")));
}
public Optional<String> globalServiceId() { return defaultInstance().globalServiceId(); }
public UpgradePolicy upgradePolicy() { return defaultInstance().upgradePolicy(); }
/** Returns the major version this application is pinned to, or empty (default) to allow all major versions */
public Optional<Integer> majorVersion() { return majorVersion; }
public boolean canUpgradeAt(Instant instant) { return defaultInstance().canUpgradeAt(instant); }
public boolean canChangeRevisionAt(Instant instant) { return defaultInstance().canChangeRevisionAt(instant); }
public List<ChangeBlocker> changeBlocker() { return defaultInstance().changeBlocker(); }
/** Returns the deployment steps of this in the order they will be performed */
public List<Step> steps() {
if (singleInstance(steps)) return defaultInstance().steps();
return steps;
}
public List<DeclaredZone> zones() {
return defaultInstance().steps().stream()
.flatMap(step -> step.zones().stream())
.collect(Collectors.toList());
}
public Optional<AthenzDomain> athenzDomain() { return defaultInstance().athenzDomain(); }
public Optional<AthenzService> athenzService(Environment environment, RegionName region) {
return defaultInstance().athenzService(environment, region);
}
public Notifications notifications() { return defaultInstance().notifications(); }
public List<Endpoint> endpoints() { return defaultInstance().endpoints(); }
/** Returns the XML form of this spec, or null if it was not created by fromXml, nor is empty */
public String xmlForm() { return xmlForm; }
public boolean includes(Environment environment, Optional<RegionName> region) {
return defaultInstance().deploysTo(environment, region);
}
private static boolean singleInstance(List<DeploymentSpec.Step> steps) {
return steps.size() == 1 && steps.get(0) instanceof DeploymentInstanceSpec;
}
/** Returns the instance step containing the given instance name, or null if not present */
public DeploymentInstanceSpec instance(String name) {
return instance(InstanceName.from(name));
}
/** Returns the instance step containing the given instance name, or null if not present */
/** Returns the instance step containing the given instance name, or throws an IllegalArgumentException if not present */
public DeploymentInstanceSpec requireInstance(String name) {
return requireInstance(InstanceName.from(name));
}
public DeploymentInstanceSpec requireInstance(InstanceName name) {
DeploymentInstanceSpec instance = instance(name);
if (instance == null)
throw new IllegalArgumentException("No instance '" + name + "' in deployment.xml'. Instances: " +
instances().stream().map(spec -> spec.name().toString()).collect(Collectors.joining(",")));
return instance;
}
/** Returns the steps of this which are instances */
public List<DeploymentInstanceSpec> instances() {
return steps.stream()
.filter(step -> step instanceof DeploymentInstanceSpec).map(DeploymentInstanceSpec.class::cast)
.collect(Collectors.toList());
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(Reader reader) {
return new DeploymentSpecXmlReader().read(reader);
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(String xmlForm) {
return fromXml(xmlForm, true);
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(String xmlForm, boolean validate) {
return new DeploymentSpecXmlReader(validate).read(xmlForm);
}
public static String toMessageString(Throwable t) {
StringBuilder b = new StringBuilder();
String lastMessage = null;
String message;
for (; t != null; t = t.getCause()) {
message = t.getMessage();
if (message == null) continue;
if (message.equals(lastMessage)) continue;
if (b.length() > 0) {
b.append(": ");
}
b.append(message);
lastMessage = message;
}
return b.toString();
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
DeploymentSpec other = (DeploymentSpec) o;
return majorVersion.equals(other.majorVersion) &&
steps.equals(other.steps) &&
xmlForm.equals(other.xmlForm);
}
@Override
public int hashCode() {
return Objects.hash(majorVersion, steps, xmlForm);
}
/** This may be invoked by a continuous build */
public static void main(String[] args) {
if (args.length != 2 && args.length != 3) {
System.err.println("Usage: DeploymentSpec [file] [environment] [region]?" +
"Returns 0 if the specified zone matches the deployment spec, 1 otherwise");
System.exit(1);
}
try (BufferedReader reader = new BufferedReader(new FileReader(args[0]))) {
DeploymentSpec spec = DeploymentSpec.fromXml(reader);
Environment environment = Environment.from(args[1]);
Optional<RegionName> region = args.length == 3 ? Optional.of(RegionName.from(args[2])) : Optional.empty();
if (spec.includes(environment, region))
System.exit(0);
else
System.exit(1);
}
catch (Exception e) {
System.err.println("Exception checking deployment spec: " + toMessageString(e));
System.exit(1);
}
}
/** A deployment step */
public abstract static class Step {
/** Returns whether this step deploys to the given region */
public final boolean deploysTo(Environment environment) {
return deploysTo(environment, Optional.empty());
}
/** Returns whether this step deploys to the given environment, and (if specified) region */
public abstract boolean deploysTo(Environment environment, Optional<RegionName> region);
/** Returns the zones deployed to in this step */
public List<DeclaredZone> zones() { return Collections.emptyList(); }
/** The delay introduced by this step (beyond the time it takes to execute the step). Default is zero. */
public Duration delay() { return Duration.ZERO; }
/** Returns all the steps nested in this. This default implementatiino returns an empty list. */
public List<Step> steps() { return List.of(); }
}
/** A deployment step which is to wait for some time before progressing to the next step */
public static class Delay extends Step {
private final Duration duration;
public Delay(Duration duration) {
this.duration = duration;
}
public Duration duration() { return duration; }
@Override
public Duration delay() { return duration; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) { return false; }
@Override
public String toString() {
return "delay " + duration;
}
}
/** A deployment step which is to run deployment in a particular zone */
public static class DeclaredZone extends Step {
private final Environment environment;
private final Optional<RegionName> region;
private final boolean active;
private final Optional<AthenzService> athenzService;
private final Optional<String> testerFlavor;
public DeclaredZone(Environment environment) {
this(environment, Optional.empty(), false);
}
public DeclaredZone(Environment environment, Optional<RegionName> region, boolean active) {
this(environment, region, active, Optional.empty(), Optional.empty());
}
public DeclaredZone(Environment environment, Optional<RegionName> region, boolean active, Optional<AthenzService> athenzService) {
this(environment, region, active, athenzService, Optional.empty());
}
public DeclaredZone(Environment environment, Optional<RegionName> region, boolean active,
Optional<AthenzService> athenzService, Optional<String> testerFlavor) {
if (environment != Environment.prod && region.isPresent())
throw new IllegalArgumentException("Non-prod environments cannot specify a region");
if (environment == Environment.prod && region.isEmpty())
throw new IllegalArgumentException("Prod environments must be specified with a region");
this.environment = environment;
this.region = region;
this.active = active;
this.athenzService = athenzService;
this.testerFlavor = testerFlavor;
}
public Environment environment() { return environment; }
/** The region name, or empty if not declared */
public Optional<RegionName> region() { return region; }
/** Returns whether this zone should receive production traffic */
public boolean active() { return active; }
public Optional<String> testerFlavor() { return testerFlavor; }
public Optional<AthenzService> athenzService() { return athenzService; }
@Override
public List<DeclaredZone> zones() { return Collections.singletonList(this); }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) {
if (environment != this.environment) return false;
if (region.isPresent() && ! region.equals(this.region)) return false;
return true;
}
@Override
public int hashCode() {
return Objects.hash(environment, region);
}
@Override
public boolean equals(Object o) {
if (o == this) return true;
if ( ! (o instanceof DeclaredZone)) return false;
DeclaredZone other = (DeclaredZone)o;
if (this.environment != other.environment) return false;
if ( ! this.region.equals(other.region())) return false;
return true;
}
@Override
public String toString() {
return environment + (region.map(regionName -> "." + regionName).orElse(""));
}
}
/** A deployment step which is to run multiple steps (zones or instances) in parallel */
public static class ParallelZones extends Step {
private final List<Step> steps;
public ParallelZones(List<Step> steps) {
this.steps = List.copyOf(steps);
}
/** Returns the steps inside this which are zones */
@Override
public List<DeclaredZone> zones() {
return this.steps.stream()
.filter(step -> step instanceof DeclaredZone)
.map(DeclaredZone.class::cast)
.collect(Collectors.toList());
}
/** Returns all the steps nested in this */
@Override
public List<Step> steps() { return steps; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) {
return steps().stream().anyMatch(zone -> zone.deploysTo(environment, region));
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof ParallelZones)) return false;
ParallelZones that = (ParallelZones) o;
return Objects.equals(steps, that.steps);
}
@Override
public int hashCode() {
return Objects.hash(steps);
}
@Override
public String toString() {
return steps.size() + " parallel steps";
}
}
/** Controls when this application will be upgraded to new Vespa versions */
public enum UpgradePolicy {
/** Canary: Applications with this policy will upgrade before any other */
canary,
/** Default: Will upgrade after all canary applications upgraded successfully. The default. */
defaultPolicy,
/** Will upgrade after most default applications upgraded successfully */
conservative
}
/** A blocking of changes in a given time window */
public static class ChangeBlocker {
private final boolean revision;
private final boolean version;
private final TimeWindow window;
public ChangeBlocker(boolean revision, boolean version, TimeWindow window) {
this.revision = revision;
this.version = version;
this.window = window;
}
public boolean blocksRevisions() { return revision; }
public boolean blocksVersions() { return version; }
public TimeWindow window() { return window; }
@Override
public String toString() {
return "change blocker revision=" + revision + " version=" + version + " window=" + window;
}
}
} |
It's redundant; The method which checks array equality starts by checking length. | public void testThatPropertiesAreReencoded() throws Exception {
FastSearcher fastSearcher = createFastSearcher();
assertEquals(100, fastSearcher.getCacheControl().capacity());
Query query = new Query("?query=ignored&dispatch.summaries=false");
query.getRanking().setQueryCache(true);
Result result = doSearch(fastSearcher, query, 0, 10);
Execution execution = new Execution(chainedAsSearchChain(fastSearcher), Execution.Context.createContextStub());
assertEquals(2, result.getHitCount());
execution.fill(result);
BasicPacket receivedPacket = mockBackend.getChannel().getLastReceived();
ByteBuffer buf = ByteBuffer.allocate(1000);
receivedPacket.encode(buf);
buf.flip();
byte[] actual = new byte[buf.remaining()];
buf.get(actual);
SessionId sessionId = query.getSessionId(false);
byte IGNORE = 69;
ByteBuffer answer = ByteBuffer.allocate(1024);
answer.put(new byte[] { 0, 0, 0, (byte)(145+sessionId.asUtf8String().getByteLength()), 0, 0, 0, -37, 0, 0, 48, 17, 0, 0, 0, 0,
IGNORE, IGNORE, IGNORE, IGNORE,
7, 'd', 'e', 'f', 'a', 'u', 'l', 't', 0, 0, 0, 0x03,
0, 0, 0, 3,
0, 0, 0, 4, 'r', 'a', 'n', 'k', 0, 0, 0, 1, 0, 0, 0, 9, 's', 'e', 's', 's', 'i', 'o', 'n', 'I', 'd'});
answer.putInt(sessionId.asUtf8String().getBytes().length);
answer.put(sessionId.asUtf8String().getBytes());
answer.put(new byte [] {
0, 0, 0, 5, 'm', 'a', 't', 'c', 'h', 0, 0, 0, 1, 0, 0, 0, 24, 'd', 'o', 'c', 'u', 'm', 'e', 'n', 't', 'd', 'b', '.', 's', 'e', 'a', 'r', 'c', 'h', 'd', 'o', 'c', 't', 'y', 'p', 'e', 0, 0, 0, 4, 't', 'e', 's', 't',
0, 0, 0, 6, 'c', 'a', 'c', 'h', 'e', 's', 0, 0, 0, 1, 0, 0, 0, 5, 'q', 'u', 'e', 'r', 'y', 0, 0, 0, 4, 't', 'r', 'u', 'e',
0, 0, 0, 2});
byte [] expected = new byte [answer.position()];
answer.flip();
answer.get(expected);
for (int i = 0; i < expected.length; ++i) {
if (expected[i] == IGNORE) {
actual[i] = IGNORE;
}
}
assertArrayEquals(expected, actual);
} | public void testThatPropertiesAreReencoded() throws Exception {
FastSearcher fastSearcher = createFastSearcher();
assertEquals(100, fastSearcher.getCacheControl().capacity());
Query query = new Query("?query=ignored&dispatch.summaries=false");
query.getRanking().setQueryCache(true);
Result result = doSearch(fastSearcher, query, 0, 10);
Execution execution = new Execution(chainedAsSearchChain(fastSearcher), Execution.Context.createContextStub());
assertEquals(2, result.getHitCount());
execution.fill(result);
BasicPacket receivedPacket = mockBackend.getChannel().getLastReceived();
ByteBuffer buf = ByteBuffer.allocate(1000);
receivedPacket.encode(buf);
buf.flip();
byte[] actual = new byte[buf.remaining()];
buf.get(actual);
SessionId sessionId = query.getSessionId(false);
byte IGNORE = 69;
ByteBuffer answer = ByteBuffer.allocate(1024);
answer.put(new byte[] { 0, 0, 0, (byte)(145+sessionId.asUtf8String().getByteLength()), 0, 0, 0, -37, 0, 0, 48, 17, 0, 0, 0, 0,
IGNORE, IGNORE, IGNORE, IGNORE,
7, 'd', 'e', 'f', 'a', 'u', 'l', 't', 0, 0, 0, 0x03,
0, 0, 0, 3,
0, 0, 0, 4, 'r', 'a', 'n', 'k', 0, 0, 0, 1, 0, 0, 0, 9, 's', 'e', 's', 's', 'i', 'o', 'n', 'I', 'd'});
answer.putInt(sessionId.asUtf8String().getBytes().length);
answer.put(sessionId.asUtf8String().getBytes());
answer.put(new byte [] {
0, 0, 0, 5, 'm', 'a', 't', 'c', 'h', 0, 0, 0, 1, 0, 0, 0, 24, 'd', 'o', 'c', 'u', 'm', 'e', 'n', 't', 'd', 'b', '.', 's', 'e', 'a', 'r', 'c', 'h', 'd', 'o', 'c', 't', 'y', 'p', 'e', 0, 0, 0, 4, 't', 'e', 's', 't',
0, 0, 0, 6, 'c', 'a', 'c', 'h', 'e', 's', 0, 0, 0, 1, 0, 0, 0, 5, 'q', 'u', 'e', 'r', 'y', 0, 0, 0, 4, 't', 'r', 'u', 'e',
0, 0, 0, 2});
byte [] expected = new byte [answer.position()];
answer.flip();
answer.get(expected);
for (int i = 0; i < expected.length; ++i) {
if (expected[i] == IGNORE) {
actual[i] = IGNORE;
}
}
assertArrayEquals(expected, actual);
} | class FastSearcherTestCase {
private final static DocumentdbInfoConfig documentdbInfoConfig = new DocumentdbInfoConfig(new DocumentdbInfoConfig.Builder());
private MockBackend mockBackend;
@Test
public void testNoNormalizing() {
Logger.getLogger(FastSearcher.class.getName()).setLevel(Level.ALL);
FastSearcher fastSearcher = new FastSearcher(new MockBackend(),
new FS4ResourcePool(1),
new MockDispatcher(Collections.emptyList()),
new SummaryParameters(null),
new ClusterParams("testhittype"),
new CacheParams(100, 1e64),
documentdbInfoConfig);
MockFSChannel.setEmptyDocsums(false);
assertEquals(100, fastSearcher.getCacheControl().capacity());
Result result = doSearch(fastSearcher, new Query("?query=ignored"), 0, 10);
assertTrue(result.hits().get(0).getRelevance().getScore() > 1000);
}
@Test
public void testNullQuery() {
Logger.getLogger(FastSearcher.class.getName()).setLevel(Level.ALL);
FastSearcher fastSearcher = new FastSearcher(new MockBackend(),
new FS4ResourcePool(1),
new MockDispatcher(Collections.emptyList()),
new SummaryParameters(null),
new ClusterParams("testhittype"),
new CacheParams(100, 1e64),
documentdbInfoConfig);
String query = "?junkparam=ignored";
Result result = doSearch(fastSearcher,new Query(query), 0, 10);
ErrorMessage message = result.hits().getError();
assertNotNull("Got error", message);
assertEquals("Null query", message.getMessage());
assertEquals(query, message.getDetailedMessage());
assertEquals(Error.NULL_QUERY.code, message.getCode());
}
@Test
public void testDispatchDotSummaries() {
Logger.getLogger(FastSearcher.class.getName()).setLevel(Level.ALL);
DocumentdbInfoConfig documentdbConfigWithOneDb =
new DocumentdbInfoConfig(new DocumentdbInfoConfig.Builder().documentdb(new DocumentdbInfoConfig.Documentdb.Builder()
.name("testDb")
.summaryclass(new DocumentdbInfoConfig.Documentdb.Summaryclass.Builder().name("simple").id(7))
.rankprofile(new DocumentdbInfoConfig.Documentdb.Rankprofile.Builder()
.name("simpler").hasRankFeatures(false).hasSummaryFeatures(false))));
List<SearchCluster.Node> nodes = new ArrayList<>();
nodes.add(new SearchCluster.Node("host1", 5000, 0));
nodes.add(new SearchCluster.Node("host2", 5000, 0));
MockFS4ResourcePool mockFs4ResourcePool = new MockFS4ResourcePool();
FastSearcher fastSearcher = new FastSearcher(new MockBackend(),
mockFs4ResourcePool,
new MockDispatcher(nodes, mockFs4ResourcePool, 1, new VipStatus()),
new SummaryParameters(null),
new ClusterParams("testhittype"),
new CacheParams(100, 1e64),
documentdbConfigWithOneDb);
{
String query = "?query=sddocname:a&summary=simple";
Result result = doSearch(fastSearcher, new Query(query), 0, 10);
doFill(fastSearcher, result);
ErrorMessage error = result.hits().getError();
assertNull("Since we don't route to the dispatcher we hit the mock backend, so no error", error);
}
{
String query = "?query=sddocname:a&ranking.queryCache";
Result result = doSearch(fastSearcher, new Query(query), 0, 10);
doFill(fastSearcher, result);
ErrorMessage error = result.hits().getError();
assertEquals("Since we don't actually run summary backends we get this error when the Dispatcher is used",
"Error response from rpc node connection to host1:0: Connection error", error.getDetailedMessage());
}
{
String query = "?query=sddocname:a&dispatch.summaries&summary=simple&ranking=simpler";
Result result = doSearch(fastSearcher, new Query(query), 0, 10);
doFill(fastSearcher, result);
ErrorMessage error = result.hits().getError();
assertEquals("Since we don't actually run summary backends we get this error when the Dispatcher is used",
"Error response from rpc node connection to host1:0: Connection error", error.getDetailedMessage());
}
}
@Test
public void testQueryWithRestrict() {
mockBackend = new MockBackend();
DocumentdbInfoConfig documentdbConfigWithOneDb =
new DocumentdbInfoConfig(new DocumentdbInfoConfig.Builder().documentdb(new DocumentdbInfoConfig.Documentdb.Builder().name("testDb")));
FastSearcher fastSearcher = new FastSearcher(mockBackend,
new FS4ResourcePool(1),
new MockDispatcher(Collections.emptyList()),
new SummaryParameters(null),
new ClusterParams("testhittype"),
new CacheParams(100, 1e64),
documentdbConfigWithOneDb);
Query query = new Query("?query=foo&model.restrict=testDb");
query.prepare();
Result result = doSearch(fastSearcher, query, 0, 10);
Packet receivedPacket = mockBackend.getChannel().getLastQueryPacket();
byte[] encoded = QueryTestCase.packetToBytes(receivedPacket);
byte[] correct = new byte[] {
0, 0, 0, 100, 0, 0, 0, -38, 0, 0, 0, 0, 0, 16, 0, 6, 0, 10,
QueryTestCase.ignored, QueryTestCase.ignored, QueryTestCase.ignored, QueryTestCase.ignored,
0, 0, 0x40, 0x03, 7, 100, 101, 102, 97, 117, 108, 116, 0, 0, 0, 1, 0, 0, 0, 5, 109, 97, 116, 99, 104, 0, 0, 0, 1, 0, 0, 0, 24, 100, 111, 99, 117, 109, 101, 110, 116, 100, 98, 46, 115, 101, 97, 114, 99, 104, 100, 111, 99, 116, 121, 112, 101, 0, 0, 0, 6, 116, 101, 115, 116, 68, 98, 0, 0, 0, 1, 0, 0, 0, 7, 68, 1, 0, 3, 102, 111, 111
};
QueryTestCase.assertEqualArrays(correct, encoded);
}
@Test
public void testSearch() {
FastSearcher fastSearcher = createFastSearcher();
assertEquals(100, fastSearcher.getCacheControl().capacity());
Result result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 10);
Execution execution = new Execution(chainedAsSearchChain(fastSearcher), Execution.Context.createContextStub());
assertEquals(2, result.getHitCount());
execution.fill(result);
assertCorrectHit1((FastHit) result.hits().get(0));
assertCorrectTypes1((FastHit) result.hits().get(0));
for (int idx = 0; idx < result.getHitCount(); idx++) {
assertTrue(!result.hits().get(idx).isCached());
}
result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 10);
assertEquals(2, result.getHitCount());
execution.fill(result);
assertCorrectHit1((FastHit) result.hits().get(0));
for (int i = 0; i < result.getHitCount(); i++) {
assertTrue(result.hits().get(i) + " should be cached",
result.hits().get(i).isCached());
}
result = doSearch(fastSearcher,new Query("?query=ignored"), 6, 3);
execution.fill(result);
result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 10);
assertEquals(2, result.getHitCount());
assertCorrectHit1((FastHit) result.hits().get(0));
assertTrue("All hits are cached and the result knows it",
result.isCached());
for (int i = 0; i < result.getHitCount(); i++) {
assertTrue(result.hits().get(i) + " should be cached",
result.hits().get(i).isCached());
}
clearCache(fastSearcher);
result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 10);
assertEquals(2, result.getHitCount());
execution.fill(result);
assertCorrectHit1((FastHit) result.hits().get(0));
assertTrue("All hits are not cached", !result.isCached());
for (int i = 0; i < result.getHitCount(); i++) {
assertTrue(!result.hits().get(i).isCached());
}
clearCache(fastSearcher);
result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 1);
assertEquals(1, result.getConcreteHitCount());
execution.fill(result);
result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 2);
assertEquals(2, result.getConcreteHitCount());
execution.fill(result);
assertTrue(result.hits().get(0).isCached());
assertFalse(result.hits().get(1).isCached());
result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 2);
assertEquals(2, result.getConcreteHitCount());
execution.fill(result);
assertTrue(result.hits().get(0).isCached());
assertTrue(result.hits().get(1).isCached());
clearCache(fastSearcher);
result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 1);
assertEquals(1, result.getConcreteHitCount());
result = doSearch(fastSearcher,new Query("?query=ignored"), 1, 1);
assertEquals(1, result.getConcreteHitCount());
for (int i = 0; i < result.getHitCount(); i++) {
assertFalse("Hit " + i + " should not be cached.",
result.hits().get(i).isCached());
}
}
private Chain<Searcher> chainedAsSearchChain(Searcher topOfChain) {
List<Searcher> searchers = new ArrayList<>();
searchers.add(topOfChain);
return new Chain<>(searchers);
}
private Result doSearch(Searcher searcher, Query query, int offset, int hits) {
query.setOffset(offset);
query.setHits(hits);
return createExecution(searcher).search(query);
}
private Execution createExecution(Searcher searcher) {
Execution.Context context = new Execution.Context(null, null, null, new RendererRegistry(Collections.emptyList()), new SimpleLinguistics());
return new Execution(chainedAsSearchChain(searcher), context);
}
private void doFill(Searcher searcher, Result result) {
createExecution(searcher).fill(result);
}
@Test
private FastSearcher createFastSearcher() {
mockBackend = new MockBackend();
ConfigGetter<DocumentdbInfoConfig> getter = new ConfigGetter<>(DocumentdbInfoConfig.class);
DocumentdbInfoConfig config = getter.getConfig("file:src/test/java/com/yahoo/prelude/fastsearch/test/documentdb-info.cfg");
MockFSChannel.resetDocstamp();
Logger.getLogger(FastSearcher.class.getName()).setLevel(Level.ALL);
return new FastSearcher(mockBackend,
new FS4ResourcePool(1),
new MockDispatcher(Collections.emptyList()),
new SummaryParameters(null),
new ClusterParams("testhittype"),
new CacheParams(100, 1e64),
config);
}
@Ignore
public void testSinglePhaseCachedSupersets() {
Logger.getLogger(FastSearcher.class.getName()).setLevel(Level.ALL);
MockFSChannel.resetDocstamp();
FastSearcher fastSearcher = new FastSearcher(new MockBackend(),
new FS4ResourcePool(1),
new MockDispatcher(Collections.emptyList()),
new SummaryParameters(null),
new ClusterParams("testhittype"),
new CacheParams(100, 1e64),
documentdbInfoConfig);
CacheControl c = fastSearcher.getCacheControl();
Result result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 2);
Query q = new Query("?query=ignored");
((WordItem) q.getModel().getQueryTree().getRoot()).setUniqueID(1);
QueryPacket queryPacket = QueryPacket.create(q);
CacheKey k = new CacheKey(queryPacket);
PacketWrapper p = c.lookup(k, q);
assertEquals(1, p.getResultPackets().size());
result = doSearch(fastSearcher,new Query("?query=ignored"), 1, 1);
p = c.lookup(k, q);
assertEquals(1, p.getResultPackets().size());
assertEquals(1, result.getConcreteHitCount());
for (int i = 0; i < result.getHitCount(); i++) {
assertTrue(result.hits().get(i).isCached());
}
result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 1);
p = c.lookup(k, q);
assertEquals(1, p.getResultPackets().size());
assertEquals(1, result.getConcreteHitCount());
for (int i = 0; i < result.getHitCount(); i++) {
assertTrue(result.hits().get(i).isCached());
}
}
@Test
public void testMultiPhaseCachedSupersets() {
Logger.getLogger(FastSearcher.class.getName()).setLevel(Level.ALL);
MockFSChannel.resetDocstamp();
FastSearcher fastSearcher = new FastSearcher(new MockBackend(),
new FS4ResourcePool(1),
new MockDispatcher(Collections.emptyList()),
new SummaryParameters(null),
new ClusterParams("testhittype"),
new CacheParams(100, 1e64),
documentdbInfoConfig);
Result result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 2);
result = doSearch(fastSearcher,new Query("?query=ignored"), 1, 1);
assertEquals(1, result.getConcreteHitCount());
for (int i = 0; i < result.getHitCount(); i++) {
assertTrue(result.hits().get(i).isCached());
if (!result.hits().get(i).isMeta()) {
assertTrue(result.hits().get(i).getFilled().isEmpty());
}
}
result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 1);
assertEquals(1, result.getConcreteHitCount());
for (int i = 0; i < result.getHitCount(); i++) {
assertTrue(result.hits().get(i).isCached());
if (!result.hits().get(i).isMeta()) {
assertTrue(result.hits().get(i).getFilled().isEmpty());
}
}
}
@Test
public void testSinglePassGroupingIsForcedWithSingleNodeGroups() {
FastSearcher fastSearcher = new FastSearcher(new MockBackend(),
new FS4ResourcePool(1),
new MockDispatcher(new SearchCluster.Node("host0", 123, 0)),
new SummaryParameters(null),
new ClusterParams("testhittype"),
new CacheParams(100, 1e64),
documentdbInfoConfig);
Query q = new Query("?query=foo");
GroupingRequest request1 = GroupingRequest.newInstance(q);
request1.setRootOperation(new AllOperation());
GroupingRequest request2 = GroupingRequest.newInstance(q);
AllOperation all = new AllOperation();
all.addChild(new EachOperation());
all.addChild(new EachOperation());
request2.setRootOperation(all);
assertForceSinglePassIs(false, q);
fastSearcher.search(q, new Execution(Execution.Context.createContextStub()));
assertForceSinglePassIs(true, q);
}
@Test
public void testSinglePassGroupingIsNotForcedWithSingleNodeGroups() {
MockDispatcher dispatcher =
new MockDispatcher(ImmutableList.of(new SearchCluster.Node("host0", 123, 0),
new SearchCluster.Node("host1", 123, 0)));
FastSearcher fastSearcher = new FastSearcher(new MockBackend(),
new FS4ResourcePool(1),
dispatcher,
new SummaryParameters(null),
new ClusterParams("testhittype"),
new CacheParams(100, 1e64),
documentdbInfoConfig);
Query q = new Query("?query=foo");
GroupingRequest request1 = GroupingRequest.newInstance(q);
request1.setRootOperation(new AllOperation());
GroupingRequest request2 = GroupingRequest.newInstance(q);
AllOperation all = new AllOperation();
all.addChild(new EachOperation());
all.addChild(new EachOperation());
request2.setRootOperation(all);
assertForceSinglePassIs(false, q);
fastSearcher.search(q, new Execution(Execution.Context.createContextStub()));
assertForceSinglePassIs(false, q);
}
private void assertForceSinglePassIs(boolean expected, Query query) {
for (GroupingRequest request : GroupingRequest.getRequests(query))
assertForceSinglePassIs(expected, request.getRootOperation());
}
private void assertForceSinglePassIs(boolean expected, GroupingOperation operation) {
assertEquals("Force single pass is " + expected + " in " + operation,
expected, operation.getForceSinglePass());
for (GroupingOperation child : operation.getChildren())
assertForceSinglePassIs(expected, child);
}
@Test
public void testPing() throws IOException, InterruptedException {
Logger.getLogger(FastSearcher.class.getName()).setLevel(Level.ALL);
BackendTestCase.MockServer server = new BackendTestCase.MockServer();
FS4ResourcePool listeners = new FS4ResourcePool(new Fs4Config(new Fs4Config.Builder()));
Backend backend = listeners.getBackend(server.host.getHostString(),server.host.getPort());
FastSearcher fastSearcher = new FastSearcher(backend,
new FS4ResourcePool(1),
new MockDispatcher(Collections.emptyList()),
new SummaryParameters(null),
new ClusterParams("testhittype"),
new CacheParams(0, 0.0d),
documentdbInfoConfig);
server.dispatch.packetData = BackendTestCase.PONG;
server.dispatch.setNoChannel();
Chain<Searcher> chain = new Chain<>(fastSearcher);
Execution e = new Execution(chain, Execution.Context.createContextStub());
Pong pong = e.ping(new Ping());
assertTrue(pong.getPongPacket().isPresent());
assertEquals(127, pong.getPongPacket().get().getDocstamp());
backend.shutdown();
server.dispatch.socket.close();
server.dispatch.connection.close();
server.worker.join();
pong.setPingInfo("blbl");
assertEquals("Result of pinging using blbl", pong.toString());
}
private void clearCache(FastSearcher fastSearcher) {
fastSearcher.getCacheControl().clear();
}
private void assertCorrectTypes1(FastHit hit) {
assertEquals(String.class, hit.getField("TITLE").getClass());
assertEquals(Integer.class, hit.getField("BYTES").getClass());
}
private void assertCorrectHit1(FastHit hit) {
assertEquals(
"StudyOfMadonna.com - Interviews, Articles, Reviews, Quotes, Essays and more..",
hit.getField("TITLE"));
assertEquals("352", hit.getField("WORDS").toString());
assertEquals(2003., hit.getRelevance().getScore(), 0.01d);
assertEquals("index:0/234/0/" + FastHit.asHexString(hit.getGlobalId()), hit.getId().toString());
assertEquals("9190", hit.getField("BYTES").toString());
assertEquals("testhittype", hit.getSource());
}
@Test
public void null_summary_is_included_in_trace() {
String summary = null;
assertThat(getTraceString(summary), containsString("summary=[null]"));
}
@Test
public void non_null_summary_is_included_in_trace() {
String summary = "all";
assertThat(getTraceString(summary), containsString("summary='all'"));
}
private String getTraceString(String summary) {
FastSearcher fastSearcher = createFastSearcher();
Query query = new Query("?query=ignored");
query.getPresentation().setSummary(summary);
query.setTraceLevel(2);
Result result = doSearch(fastSearcher, query, 0, 10);
doFill(fastSearcher, result);
Trace trace = query.getContext(false).getTrace();
final AtomicReference<String> fillTraceString = new AtomicReference<>();
trace.traceNode().accept(new TraceVisitor() {
@Override
public void visit(TraceNode traceNode) {
if (traceNode.payload() instanceof String && traceNode.payload().toString().contains("fill to dispatch"))
fillTraceString.set((String) traceNode.payload());
}
});
return fillTraceString.get();
}
} | class FastSearcherTestCase {
private final static DocumentdbInfoConfig documentdbInfoConfig = new DocumentdbInfoConfig(new DocumentdbInfoConfig.Builder());
private MockBackend mockBackend;
@Test
public void testNoNormalizing() {
Logger.getLogger(FastSearcher.class.getName()).setLevel(Level.ALL);
FastSearcher fastSearcher = new FastSearcher(new MockBackend(),
new FS4ResourcePool(1),
new MockDispatcher(Collections.emptyList()),
new SummaryParameters(null),
new ClusterParams("testhittype"),
new CacheParams(100, 1e64),
documentdbInfoConfig);
MockFSChannel.setEmptyDocsums(false);
assertEquals(100, fastSearcher.getCacheControl().capacity());
Result result = doSearch(fastSearcher, new Query("?query=ignored"), 0, 10);
assertTrue(result.hits().get(0).getRelevance().getScore() > 1000);
}
@Test
public void testNullQuery() {
Logger.getLogger(FastSearcher.class.getName()).setLevel(Level.ALL);
FastSearcher fastSearcher = new FastSearcher(new MockBackend(),
new FS4ResourcePool(1),
new MockDispatcher(Collections.emptyList()),
new SummaryParameters(null),
new ClusterParams("testhittype"),
new CacheParams(100, 1e64),
documentdbInfoConfig);
String query = "?junkparam=ignored";
Result result = doSearch(fastSearcher,new Query(query), 0, 10);
ErrorMessage message = result.hits().getError();
assertNotNull("Got error", message);
assertEquals("Null query", message.getMessage());
assertEquals(query, message.getDetailedMessage());
assertEquals(Error.NULL_QUERY.code, message.getCode());
}
@Test
public void testDispatchDotSummaries() {
Logger.getLogger(FastSearcher.class.getName()).setLevel(Level.ALL);
DocumentdbInfoConfig documentdbConfigWithOneDb =
new DocumentdbInfoConfig(new DocumentdbInfoConfig.Builder().documentdb(new DocumentdbInfoConfig.Documentdb.Builder()
.name("testDb")
.summaryclass(new DocumentdbInfoConfig.Documentdb.Summaryclass.Builder().name("simple").id(7))
.rankprofile(new DocumentdbInfoConfig.Documentdb.Rankprofile.Builder()
.name("simpler").hasRankFeatures(false).hasSummaryFeatures(false))));
List<SearchCluster.Node> nodes = new ArrayList<>();
nodes.add(new SearchCluster.Node("host1", 5000, 0));
nodes.add(new SearchCluster.Node("host2", 5000, 0));
MockFS4ResourcePool mockFs4ResourcePool = new MockFS4ResourcePool();
FastSearcher fastSearcher = new FastSearcher(new MockBackend(),
mockFs4ResourcePool,
new MockDispatcher(nodes, mockFs4ResourcePool, 1, new VipStatus()),
new SummaryParameters(null),
new ClusterParams("testhittype"),
new CacheParams(100, 1e64),
documentdbConfigWithOneDb);
{
String query = "?query=sddocname:a&summary=simple";
Result result = doSearch(fastSearcher, new Query(query), 0, 10);
doFill(fastSearcher, result);
ErrorMessage error = result.hits().getError();
assertNull("Since we don't route to the dispatcher we hit the mock backend, so no error", error);
}
{
String query = "?query=sddocname:a&ranking.queryCache";
Result result = doSearch(fastSearcher, new Query(query), 0, 10);
doFill(fastSearcher, result);
ErrorMessage error = result.hits().getError();
assertEquals("Since we don't actually run summary backends we get this error when the Dispatcher is used",
"Error response from rpc node connection to host1:0: Connection error", error.getDetailedMessage());
}
{
String query = "?query=sddocname:a&dispatch.summaries&summary=simple&ranking=simpler";
Result result = doSearch(fastSearcher, new Query(query), 0, 10);
doFill(fastSearcher, result);
ErrorMessage error = result.hits().getError();
assertEquals("Since we don't actually run summary backends we get this error when the Dispatcher is used",
"Error response from rpc node connection to host1:0: Connection error", error.getDetailedMessage());
}
}
@Test
public void testQueryWithRestrict() {
mockBackend = new MockBackend();
DocumentdbInfoConfig documentdbConfigWithOneDb =
new DocumentdbInfoConfig(new DocumentdbInfoConfig.Builder().documentdb(new DocumentdbInfoConfig.Documentdb.Builder().name("testDb")));
FastSearcher fastSearcher = new FastSearcher(mockBackend,
new FS4ResourcePool(1),
new MockDispatcher(Collections.emptyList()),
new SummaryParameters(null),
new ClusterParams("testhittype"),
new CacheParams(100, 1e64),
documentdbConfigWithOneDb);
Query query = new Query("?query=foo&model.restrict=testDb");
query.prepare();
Result result = doSearch(fastSearcher, query, 0, 10);
Packet receivedPacket = mockBackend.getChannel().getLastQueryPacket();
byte[] encoded = QueryTestCase.packetToBytes(receivedPacket);
byte[] correct = new byte[] {
0, 0, 0, 100, 0, 0, 0, -38, 0, 0, 0, 0, 0, 16, 0, 6, 0, 10,
QueryTestCase.ignored, QueryTestCase.ignored, QueryTestCase.ignored, QueryTestCase.ignored,
0, 0, 0x40, 0x03, 7, 100, 101, 102, 97, 117, 108, 116, 0, 0, 0, 1, 0, 0, 0, 5, 109, 97, 116, 99, 104, 0, 0, 0, 1, 0, 0, 0, 24, 100, 111, 99, 117, 109, 101, 110, 116, 100, 98, 46, 115, 101, 97, 114, 99, 104, 100, 111, 99, 116, 121, 112, 101, 0, 0, 0, 6, 116, 101, 115, 116, 68, 98, 0, 0, 0, 1, 0, 0, 0, 7, 68, 1, 0, 3, 102, 111, 111
};
QueryTestCase.assertEqualArrays(correct, encoded);
}
@Test
public void testSearch() {
FastSearcher fastSearcher = createFastSearcher();
assertEquals(100, fastSearcher.getCacheControl().capacity());
Result result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 10);
Execution execution = new Execution(chainedAsSearchChain(fastSearcher), Execution.Context.createContextStub());
assertEquals(2, result.getHitCount());
execution.fill(result);
assertCorrectHit1((FastHit) result.hits().get(0));
assertCorrectTypes1((FastHit) result.hits().get(0));
for (int idx = 0; idx < result.getHitCount(); idx++) {
assertTrue(!result.hits().get(idx).isCached());
}
result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 10);
assertEquals(2, result.getHitCount());
execution.fill(result);
assertCorrectHit1((FastHit) result.hits().get(0));
for (int i = 0; i < result.getHitCount(); i++) {
assertTrue(result.hits().get(i) + " should be cached",
result.hits().get(i).isCached());
}
result = doSearch(fastSearcher,new Query("?query=ignored"), 6, 3);
execution.fill(result);
result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 10);
assertEquals(2, result.getHitCount());
assertCorrectHit1((FastHit) result.hits().get(0));
assertTrue("All hits are cached and the result knows it",
result.isCached());
for (int i = 0; i < result.getHitCount(); i++) {
assertTrue(result.hits().get(i) + " should be cached",
result.hits().get(i).isCached());
}
clearCache(fastSearcher);
result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 10);
assertEquals(2, result.getHitCount());
execution.fill(result);
assertCorrectHit1((FastHit) result.hits().get(0));
assertTrue("All hits are not cached", !result.isCached());
for (int i = 0; i < result.getHitCount(); i++) {
assertTrue(!result.hits().get(i).isCached());
}
clearCache(fastSearcher);
result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 1);
assertEquals(1, result.getConcreteHitCount());
execution.fill(result);
result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 2);
assertEquals(2, result.getConcreteHitCount());
execution.fill(result);
assertTrue(result.hits().get(0).isCached());
assertFalse(result.hits().get(1).isCached());
result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 2);
assertEquals(2, result.getConcreteHitCount());
execution.fill(result);
assertTrue(result.hits().get(0).isCached());
assertTrue(result.hits().get(1).isCached());
clearCache(fastSearcher);
result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 1);
assertEquals(1, result.getConcreteHitCount());
result = doSearch(fastSearcher,new Query("?query=ignored"), 1, 1);
assertEquals(1, result.getConcreteHitCount());
for (int i = 0; i < result.getHitCount(); i++) {
assertFalse("Hit " + i + " should not be cached.",
result.hits().get(i).isCached());
}
}
private Chain<Searcher> chainedAsSearchChain(Searcher topOfChain) {
List<Searcher> searchers = new ArrayList<>();
searchers.add(topOfChain);
return new Chain<>(searchers);
}
private Result doSearch(Searcher searcher, Query query, int offset, int hits) {
query.setOffset(offset);
query.setHits(hits);
return createExecution(searcher).search(query);
}
private Execution createExecution(Searcher searcher) {
Execution.Context context = new Execution.Context(null, null, null, new RendererRegistry(Collections.emptyList()), new SimpleLinguistics());
return new Execution(chainedAsSearchChain(searcher), context);
}
private void doFill(Searcher searcher, Result result) {
createExecution(searcher).fill(result);
}
@Test
private FastSearcher createFastSearcher() {
mockBackend = new MockBackend();
ConfigGetter<DocumentdbInfoConfig> getter = new ConfigGetter<>(DocumentdbInfoConfig.class);
DocumentdbInfoConfig config = getter.getConfig("file:src/test/java/com/yahoo/prelude/fastsearch/test/documentdb-info.cfg");
MockFSChannel.resetDocstamp();
Logger.getLogger(FastSearcher.class.getName()).setLevel(Level.ALL);
return new FastSearcher(mockBackend,
new FS4ResourcePool(1),
new MockDispatcher(Collections.emptyList()),
new SummaryParameters(null),
new ClusterParams("testhittype"),
new CacheParams(100, 1e64),
config);
}
@Ignore
public void testSinglePhaseCachedSupersets() {
Logger.getLogger(FastSearcher.class.getName()).setLevel(Level.ALL);
MockFSChannel.resetDocstamp();
FastSearcher fastSearcher = new FastSearcher(new MockBackend(),
new FS4ResourcePool(1),
new MockDispatcher(Collections.emptyList()),
new SummaryParameters(null),
new ClusterParams("testhittype"),
new CacheParams(100, 1e64),
documentdbInfoConfig);
CacheControl c = fastSearcher.getCacheControl();
Result result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 2);
Query q = new Query("?query=ignored");
((WordItem) q.getModel().getQueryTree().getRoot()).setUniqueID(1);
QueryPacket queryPacket = QueryPacket.create(q);
CacheKey k = new CacheKey(queryPacket);
PacketWrapper p = c.lookup(k, q);
assertEquals(1, p.getResultPackets().size());
result = doSearch(fastSearcher,new Query("?query=ignored"), 1, 1);
p = c.lookup(k, q);
assertEquals(1, p.getResultPackets().size());
assertEquals(1, result.getConcreteHitCount());
for (int i = 0; i < result.getHitCount(); i++) {
assertTrue(result.hits().get(i).isCached());
}
result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 1);
p = c.lookup(k, q);
assertEquals(1, p.getResultPackets().size());
assertEquals(1, result.getConcreteHitCount());
for (int i = 0; i < result.getHitCount(); i++) {
assertTrue(result.hits().get(i).isCached());
}
}
@Test
public void testMultiPhaseCachedSupersets() {
Logger.getLogger(FastSearcher.class.getName()).setLevel(Level.ALL);
MockFSChannel.resetDocstamp();
FastSearcher fastSearcher = new FastSearcher(new MockBackend(),
new FS4ResourcePool(1),
new MockDispatcher(Collections.emptyList()),
new SummaryParameters(null),
new ClusterParams("testhittype"),
new CacheParams(100, 1e64),
documentdbInfoConfig);
Result result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 2);
result = doSearch(fastSearcher,new Query("?query=ignored"), 1, 1);
assertEquals(1, result.getConcreteHitCount());
for (int i = 0; i < result.getHitCount(); i++) {
assertTrue(result.hits().get(i).isCached());
if (!result.hits().get(i).isMeta()) {
assertTrue(result.hits().get(i).getFilled().isEmpty());
}
}
result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 1);
assertEquals(1, result.getConcreteHitCount());
for (int i = 0; i < result.getHitCount(); i++) {
assertTrue(result.hits().get(i).isCached());
if (!result.hits().get(i).isMeta()) {
assertTrue(result.hits().get(i).getFilled().isEmpty());
}
}
}
@Test
public void testSinglePassGroupingIsForcedWithSingleNodeGroups() {
FastSearcher fastSearcher = new FastSearcher(new MockBackend(),
new FS4ResourcePool(1),
new MockDispatcher(new SearchCluster.Node("host0", 123, 0)),
new SummaryParameters(null),
new ClusterParams("testhittype"),
new CacheParams(100, 1e64),
documentdbInfoConfig);
Query q = new Query("?query=foo");
GroupingRequest request1 = GroupingRequest.newInstance(q);
request1.setRootOperation(new AllOperation());
GroupingRequest request2 = GroupingRequest.newInstance(q);
AllOperation all = new AllOperation();
all.addChild(new EachOperation());
all.addChild(new EachOperation());
request2.setRootOperation(all);
assertForceSinglePassIs(false, q);
fastSearcher.search(q, new Execution(Execution.Context.createContextStub()));
assertForceSinglePassIs(true, q);
}
@Test
public void testSinglePassGroupingIsNotForcedWithSingleNodeGroups() {
MockDispatcher dispatcher =
new MockDispatcher(ImmutableList.of(new SearchCluster.Node("host0", 123, 0),
new SearchCluster.Node("host1", 123, 0)));
FastSearcher fastSearcher = new FastSearcher(new MockBackend(),
new FS4ResourcePool(1),
dispatcher,
new SummaryParameters(null),
new ClusterParams("testhittype"),
new CacheParams(100, 1e64),
documentdbInfoConfig);
Query q = new Query("?query=foo");
GroupingRequest request1 = GroupingRequest.newInstance(q);
request1.setRootOperation(new AllOperation());
GroupingRequest request2 = GroupingRequest.newInstance(q);
AllOperation all = new AllOperation();
all.addChild(new EachOperation());
all.addChild(new EachOperation());
request2.setRootOperation(all);
assertForceSinglePassIs(false, q);
fastSearcher.search(q, new Execution(Execution.Context.createContextStub()));
assertForceSinglePassIs(false, q);
}
private void assertForceSinglePassIs(boolean expected, Query query) {
for (GroupingRequest request : GroupingRequest.getRequests(query))
assertForceSinglePassIs(expected, request.getRootOperation());
}
private void assertForceSinglePassIs(boolean expected, GroupingOperation operation) {
assertEquals("Force single pass is " + expected + " in " + operation,
expected, operation.getForceSinglePass());
for (GroupingOperation child : operation.getChildren())
assertForceSinglePassIs(expected, child);
}
@Test
public void testPing() throws IOException, InterruptedException {
Logger.getLogger(FastSearcher.class.getName()).setLevel(Level.ALL);
BackendTestCase.MockServer server = new BackendTestCase.MockServer();
FS4ResourcePool listeners = new FS4ResourcePool(new Fs4Config(new Fs4Config.Builder()));
Backend backend = listeners.getBackend(server.host.getHostString(),server.host.getPort());
FastSearcher fastSearcher = new FastSearcher(backend,
new FS4ResourcePool(1),
new MockDispatcher(Collections.emptyList()),
new SummaryParameters(null),
new ClusterParams("testhittype"),
new CacheParams(0, 0.0d),
documentdbInfoConfig);
server.dispatch.packetData = BackendTestCase.PONG;
server.dispatch.setNoChannel();
Chain<Searcher> chain = new Chain<>(fastSearcher);
Execution e = new Execution(chain, Execution.Context.createContextStub());
Pong pong = e.ping(new Ping());
assertTrue(pong.getPongPacket().isPresent());
assertEquals(127, pong.getPongPacket().get().getDocstamp());
backend.shutdown();
server.dispatch.socket.close();
server.dispatch.connection.close();
server.worker.join();
pong.setPingInfo("blbl");
assertEquals("Result of pinging using blbl", pong.toString());
}
private void clearCache(FastSearcher fastSearcher) {
fastSearcher.getCacheControl().clear();
}
private void assertCorrectTypes1(FastHit hit) {
assertEquals(String.class, hit.getField("TITLE").getClass());
assertEquals(Integer.class, hit.getField("BYTES").getClass());
}
private void assertCorrectHit1(FastHit hit) {
assertEquals(
"StudyOfMadonna.com - Interviews, Articles, Reviews, Quotes, Essays and more..",
hit.getField("TITLE"));
assertEquals("352", hit.getField("WORDS").toString());
assertEquals(2003., hit.getRelevance().getScore(), 0.01d);
assertEquals("index:0/234/0/" + FastHit.asHexString(hit.getGlobalId()), hit.getId().toString());
assertEquals("9190", hit.getField("BYTES").toString());
assertEquals("testhittype", hit.getSource());
}
@Test
public void null_summary_is_included_in_trace() {
String summary = null;
assertThat(getTraceString(summary), containsString("summary=[null]"));
}
@Test
public void non_null_summary_is_included_in_trace() {
String summary = "all";
assertThat(getTraceString(summary), containsString("summary='all'"));
}
private String getTraceString(String summary) {
FastSearcher fastSearcher = createFastSearcher();
Query query = new Query("?query=ignored");
query.getPresentation().setSummary(summary);
query.setTraceLevel(2);
Result result = doSearch(fastSearcher, query, 0, 10);
doFill(fastSearcher, result);
Trace trace = query.getContext(false).getTrace();
final AtomicReference<String> fillTraceString = new AtomicReference<>();
trace.traceNode().accept(new TraceVisitor() {
@Override
public void visit(TraceNode traceNode) {
if (traceNode.payload() instanceof String && traceNode.payload().toString().contains("fill to dispatch"))
fillTraceString.set((String) traceNode.payload());
}
});
return fillTraceString.get();
}
} | |
This test can be simplified to ``` assertResultingDocumentSelection("group/abc?continuation=abc", "id.group=='abc'"); ``` ? | public void testUseExpressionOnVisit() throws Exception {
Request request = new Request("http:
HttpGet get = new HttpGet(request.getUri());
String rest = doRest(get);
assertThat(rest, containsString(visit_test_response_selection_rewrite));
} | Request request = new Request("http: | public void testUseExpressionOnVisit() throws Exception {
assertResultingDocumentSelection("group/abc?continuation=xyz", "id.group=='abc'");
} | class RestApiTest {
Application application;
@Before
public void setup() throws Exception {
application = Application.fromApplicationPackage(Paths.get("src/test/rest-api-application"), Networking.enable);
}
@After
public void tearDown() throws Exception {
application.close();
}
String post_test_uri = "/document/v1/namespace/testdocument/docid/c";
String post_test_doc = "{\n" +
"\"foo\" : \"bar\"," +
"\"fields\": {\n" +
"\"title\": \"This is the title\",\n" +
"\"body\": \"This is the body\"" +
"}" +
"}";
String post_test_response = "{\"id\":\"id:namespace:testdocument::c\"," +
"\"pathId\":\"/document/v1/namespace/testdocument/docid/c\"}";
@Ignore
@Test
public void blockingTest() throws Exception {
System.out.println("Running on port " + getFirstListenPort());
Thread.sleep(Integer.MAX_VALUE);
}
@Test
public void testbasicPost() throws Exception {
Request request = new Request("http:
HttpPost httpPost = new HttpPost(request.getUri());
StringEntity entity = new StringEntity(post_test_doc, ContentType.create("application/json"));
httpPost.setEntity(entity);
String x = doRest(httpPost);
assertThat(x, is(post_test_response));
}
String post_test_uri_cond = "/document/v1/namespace/testdocument/docid/c?condition=foo";
String post_test_doc_cond = "{\n" +
"\"foo\" : \"bar\"," +
"\"fields\": {\n" +
"\"title\": \"This is the title\",\n" +
"\"body\": \"This is the body\"" +
"}" +
"}";
String post_test_response_cond = "{\"id\":\"id:namespace:testdocument::c\"," +
"\"pathId\":\"/document/v1/namespace/testdocument/docid/c\"}";
@Test
public void testConditionalPost() throws Exception {
Request request = new Request("http:
HttpPost httpPost = new HttpPost(request.getUri());
StringEntity entity = new StringEntity(post_test_doc_cond, ContentType.create("application/json"));
httpPost.setEntity(entity);
String x = doRest(httpPost);
assertThat(x, is(post_test_response_cond));
}
@Test
public void testEmptyPost() throws Exception {
Request request = new Request("http:
HttpPost httpPost = new HttpPost(request.getUri());
StringEntity entity = new StringEntity("", ContentType.create("application/json"));
httpPost.setEntity(entity);
String x = doRest(httpPost);
assertThat(x, containsString("Could not read document, no document?"));
}
String update_test_uri = "/document/v1/namespace/testdocument/docid/c";
String update_test_doc = "{\n" +
"\t\"fields\": {\n" +
"\"title\": {\n" +
"\"assign\": \"Oh lala\"\n" +
"}\n" +
"}\n" +
"}\n";
String update_test_response = "{\"id\":\"id:namespace:testdocument::c\"," +
"\"pathId\":\"/document/v1/namespace/testdocument/docid/c\"}";
@Test
public void testbasicUpdate() throws Exception {
Request request = new Request("http:
HttpPut httpPut = new HttpPut(request.getUri());
StringEntity entity = new StringEntity(update_test_doc, ContentType.create("application/json"));
httpPut.setEntity(entity);
assertThat(doRest(httpPut), is(update_test_response));
assertThat(getLog(), not(containsString("CREATE IF NON EXISTING IS TRUE")));
}
@Test
public void testbasicUpdateCreateTrue() throws Exception {
Request request = new Request("http:
HttpPut httpPut = new HttpPut(request.getUri());
StringEntity entity = new StringEntity(update_test_doc, ContentType.create("application/json"));
httpPut.setEntity(entity);
assertThat(doRest(httpPut), is(update_test_response));
assertThat(getLog(), containsString("CREATE IF NON EXISTENT IS TRUE"));
}
String update_test_create_if_non_existient_uri = "/document/v1/namespace/testdocument/docid/c";
String update_test_create_if_non_existient_doc = "{\n" +
"\"create\":true," +
"\t\"fields\": {\n" +
"\"title\": {\n" +
"\"assign\": \"Oh lala\"\n" +
"}\n" +
"}\n" +
"}\n";
String update_test_create_if_non_existing_response = "{\"id\":\"id:namespace:testdocument::c\"," +
"\"pathId\":\"/document/v1/namespace/testdocument/docid/c\"}";
@Test
public void testCreateIfNonExistingUpdateInDocTrue() throws Exception {
Request request = new Request("http:
HttpPut httpPut = new HttpPut(request.getUri());
StringEntity entity = new StringEntity(update_test_create_if_non_existient_doc, ContentType.create("application/json"));
httpPut.setEntity(entity);
assertThat(doRest(httpPut), is(update_test_create_if_non_existing_response));
assertThat(getLog(), containsString("CREATE IF NON EXISTENT IS TRUE"));
}
@Test
public void testCreateIfNonExistingUpdateInDocTrueButQueryParamsFalse() throws Exception {
Request request = new Request("http:
HttpPut httpPut = new HttpPut(request.getUri());
StringEntity entity = new StringEntity(update_test_create_if_non_existient_doc, ContentType.create("application/json"));
httpPut.setEntity(entity);
assertThat(doRest(httpPut), is(update_test_create_if_non_existing_response));
assertThat(getLog(), not(containsString("CREATE IF NON EXISTENT IS TRUE")));
}
String getLog() throws IOException {
Request request = new Request("http:
HttpDelete delete = new HttpDelete(request.getUri());
doRest(delete);
return doRest(delete);
}
String remove_test_uri = "/document/v1/namespace/testdocument/docid/c";
String remove_test_response = "{\"id\":\"id:namespace:testdocument::c\"," +
"\"pathId\":\"/document/v1/namespace/testdocument/docid/c\"}";
@Test
public void testbasicRemove() throws Exception {
Request request = new Request("http:
HttpDelete delete = new HttpDelete(request.getUri());
assertThat(doRest(delete), is(remove_test_response));
}
String get_test_uri = "/document/v1/namespace/document-type/docid/c";
String get_response_part1 = "\"pathId\":\"/document/v1/namespace/document-type/docid/c\"";
String get_response_part2 = "\"id\":\"id:namespace:document-type::c\"";
@Test
public void testbasicGet() throws Exception {
Request request = new Request("http:
HttpGet get = new HttpGet(request.getUri());
final String rest = doRest(get);
assertThat(rest, containsString(get_response_part1));
assertThat(rest, containsString(get_response_part2));
}
String id_test_uri = "/document/v1/namespace/document-type/docid/f/u/n/n/y/!";
String id_response_part1 = "\"pathId\":\"/document/v1/namespace/document-type/docid/f/u/n/n/y/!\"";
String id_response_part2 = "\"id\":\"id:namespace:document-type::f/u/n/n/y/!\"";
@Test
public void testSlashesInId() throws Exception {
Request request = new Request("http:
HttpGet get = new HttpGet(request.getUri());
final String rest = doRest(get);
assertThat(rest, containsString(id_response_part1));
assertThat(rest, containsString(id_response_part2));
}
String get_enc_id = "!\":æøå@/& Q1+";
String get_enc_id_encoded_v1 = "!%22%3A%C3%A6%C3%B8%C3%A5%40%2F%26%20Q1%2B";
String get_enc_id_encoded_v2 = "%21%22%3A%C3%A6%C3%B8%C3%A5%40%2F%26+Q1%2B";
String get_enc_test_uri_v1 = "/document/v1/namespace/document-type/docid/" + get_enc_id_encoded_v1;
String get_enc_test_uri_v2 = "/document/v1/namespace/document-type/docid/" + get_enc_id_encoded_v2;
String get_enc_response_part1 = "\"pathId\":\"/document/v1/namespace/document-type/docid/" + get_enc_id_encoded_v1 + "\"";
String get_enc_response_part1_v2 = "\"pathId\":\"/document/v1/namespace/document-type/docid/" + get_enc_id_encoded_v2 + "\"";
String get_enc_response_part2 = "\"id\":\"id:namespace:document-type::" + get_enc_id.replace("\"", "\\\"") + "\"";
@Test
public void testbasicEncodingV1() throws Exception {
Request request = new Request("http:
HttpGet get = new HttpGet(request.getUri());
final String rest = doRest(get);
assertThat(rest, containsString(get_enc_response_part1));
assertThat(rest, containsString(get_enc_response_part2));
}
@Test
public void testbasicEncodingV2() throws Exception {
Request request = new Request("http:
HttpGet get = new HttpGet(request.getUri());
String rest = doRest(get);
assertThat(rest, containsString(get_enc_response_part1_v2));
assertThat(rest, containsString(get_enc_response_part2));
}
@Test
public void get_fieldset_parameter_is_propagated() throws IOException {
Request request = new Request(String.format("http:
HttpGet get = new HttpGet(request.getUri());
String rest = doRest(get);
assertThat(rest, containsString("\"fieldset\":\"foo,baz\""));
}
String visit_test_uri = "/document/v1/namespace/document-type/docid/?continuation=abc";
String visit_response_part1 = "\"documents\":[List of json docs, cont token abc, doc selection: '']";
String visit_response_part2 = "\"continuation\":\"token\"";
String visit_response_part3 = "\"pathId\":\"/document/v1/namespace/document-type/docid/\"";
@Test
public void testbasicVisit() throws Exception {
Request request = new Request("http:
HttpGet get = new HttpGet(request.getUri());
String rest = doRest(get);
assertThat(rest, containsString(visit_response_part1));
assertThat(rest, containsString(visit_response_part2));
assertThat(rest, containsString(visit_response_part3));
}
String visit_test_uri_selection_rewrite = "/document/v1/namespace/document-type/group/abc?continuation=abc";
String visit_test_response_selection_rewrite = "doc selection: 'id.group=='abc''";
@Test
private static String encoded(String original) {
try {
return URLEncoder.encode(original, StandardCharsets.UTF_8.name());
} catch (UnsupportedEncodingException e) {
throw new RuntimeException(e);
}
}
private String performV1RestCall(String pathSuffix) {
try {
Request request = new Request(String.format("http:
getFirstListenPort(), pathSuffix));
HttpGet get = new HttpGet(request.getUri());
return doRest(get);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
private void assertResultingDocumentSelection(String suffix, String expected) {
String output = performV1RestCall(suffix);
assertThat(output, containsString(String.format("doc selection: '%s'", expected)));
}
private void assertGroupDocumentSelection(String group, String expected) {
assertResultingDocumentSelection("group/" + encoded(group), expected);
}
@Test
public void group_strings_are_escaped() {
assertGroupDocumentSelection("'", "id.group=='\\''");
assertGroupDocumentSelection("hello 'world'", "id.group=='hello \\'world\\''");
assertGroupDocumentSelection("' goodbye moon", "id.group=='\\' goodbye moon'");
}
private void assertNumericIdFailsParsing(String id) {
String output = performV1RestCall(String.format("number/%s", encoded(id)));
assertThat(output, containsString("Failed to parse numeric part of selection URI"));
}
@Test
public void invalid_numeric_id_returns_error() {
assertNumericIdFailsParsing("123a");
assertNumericIdFailsParsing("a123");
assertNumericIdFailsParsing("0x1234");
assertNumericIdFailsParsing("\u0000");
}
@Test
public void non_text_group_string_character_returns_error() {
String output = performV1RestCall(String.format("group/%s", encoded("\u001f")));
assertThat(output, containsString("Failed to parse group part of selection URI; contains invalid text code point U001F"));
}
@Test
public void can_specify_numeric_id_without_explicit_selection() {
assertResultingDocumentSelection("number/1234", "id.user==1234");
}
@Test
public void can_specify_group_id_without_explicit_selection() {
assertResultingDocumentSelection("group/foo", "id.group=='foo'");
}
@Test
public void can_specify_both_numeric_id_and_explicit_selection() {
assertResultingDocumentSelection(String.format("number/1234?selection=%s", encoded("1 != 2")),
"id.user==1234 and (1 != 2)");
}
@Test
public void can_specify_both_group_id_and_explicit_selection() {
assertResultingDocumentSelection(String.format("group/bar?selection=%s", encoded("3 != 4")),
"id.group=='bar' and (3 != 4)");
}
private void assertDocumentSelectionFailsParsing(String expression) {
String output = performV1RestCall(String.format("number/1234?selection=%s", encoded(expression)));
assertThat(output, containsString("Failed to parse expression given in 'selection' parameter. Must be a complete and valid sub-expression."));
}
@Test
public void explicit_selection_sub_expression_is_validated_for_completeness() {
assertDocumentSelectionFailsParsing("1 +");
assertDocumentSelectionFailsParsing(") or true");
assertDocumentSelectionFailsParsing("((1 + 2)");
assertDocumentSelectionFailsParsing("true) or (true");
}
@Test
public void wanted_document_count_returned_parameter_is_propagated() throws IOException {
Request request = new Request(String.format("http:
HttpGet get = new HttpGet(request.getUri());
String rest = doRest(get);
assertThat(rest, containsString("min docs returned: 321"));
}
@Test
public void invalid_wanted_document_count_parameter_returns_error_response() throws IOException {
Request request = new Request(String.format("http:
HttpGet get = new HttpGet(request.getUri());
String rest = doRest(get);
assertThat(rest, containsString("Invalid 'wantedDocumentCount' value. Expected positive integer"));
}
@Test
public void negative_document_count_parameter_returns_error_response() throws IOException {
Request request = new Request(String.format("http:
HttpGet get = new HttpGet(request.getUri());
String rest = doRest(get);
assertThat(rest, containsString("Invalid 'wantedDocumentCount' value. Expected positive integer"));
}
@Test
public void visit_fieldset_parameter_is_propagated() throws IOException {
Request request = new Request(String.format("http:
HttpGet get = new HttpGet(request.getUri());
String rest = doRest(get);
assertThat(rest, containsString("field set: 'foo,baz'"));
}
@Test
public void visit_concurrency_parameter_is_propagated() throws IOException {
Request request = new Request(String.format("http:
HttpGet get = new HttpGet(request.getUri());
String rest = doRest(get);
assertThat(rest, containsString("concurrency: 42"));
}
@Test
public void invalid_visit_concurrency_parameter_returns_error_response() throws IOException {
Request request = new Request(String.format("http:
HttpGet get = new HttpGet(request.getUri());
String rest = doRest(get);
assertThat(rest, containsString("Invalid 'concurrency' value. Expected positive integer"));
}
private String doRest(HttpRequestBase request) throws IOException {
HttpClient client = HttpClientBuilder.create().build();
HttpResponse response = client.execute(request);
assertThat(response.getEntity().getContentType().getValue().toString(), startsWith("application/json;"));
HttpEntity entity = response.getEntity();
return EntityUtils.toString(entity);
}
private String getFirstListenPort() {
JettyHttpServer serverProvider =
(JettyHttpServer) Container.get().getServerProviderRegistry().allComponents().get(0);
return Integer.toString(serverProvider.getListenPort());
}
} | class RestApiTest {
Application application;
@Before
public void setup() throws Exception {
application = Application.fromApplicationPackage(Paths.get("src/test/rest-api-application"), Networking.enable);
}
@After
public void tearDown() throws Exception {
application.close();
}
String post_test_uri = "/document/v1/namespace/testdocument/docid/c";
String post_test_doc = "{\n" +
"\"foo\" : \"bar\"," +
"\"fields\": {\n" +
"\"title\": \"This is the title\",\n" +
"\"body\": \"This is the body\"" +
"}" +
"}";
String post_test_response = "{\"id\":\"id:namespace:testdocument::c\"," +
"\"pathId\":\"/document/v1/namespace/testdocument/docid/c\"}";
@Ignore
@Test
public void blockingTest() throws Exception {
System.out.println("Running on port " + getFirstListenPort());
Thread.sleep(Integer.MAX_VALUE);
}
@Test
public void testbasicPost() throws Exception {
Request request = new Request("http:
HttpPost httpPost = new HttpPost(request.getUri());
StringEntity entity = new StringEntity(post_test_doc, ContentType.create("application/json"));
httpPost.setEntity(entity);
String x = doRest(httpPost);
assertThat(x, is(post_test_response));
}
String post_test_uri_cond = "/document/v1/namespace/testdocument/docid/c?condition=foo";
String post_test_doc_cond = "{\n" +
"\"foo\" : \"bar\"," +
"\"fields\": {\n" +
"\"title\": \"This is the title\",\n" +
"\"body\": \"This is the body\"" +
"}" +
"}";
String post_test_response_cond = "{\"id\":\"id:namespace:testdocument::c\"," +
"\"pathId\":\"/document/v1/namespace/testdocument/docid/c\"}";
@Test
public void testConditionalPost() throws Exception {
Request request = new Request("http:
HttpPost httpPost = new HttpPost(request.getUri());
StringEntity entity = new StringEntity(post_test_doc_cond, ContentType.create("application/json"));
httpPost.setEntity(entity);
String x = doRest(httpPost);
assertThat(x, is(post_test_response_cond));
}
@Test
public void testEmptyPost() throws Exception {
Request request = new Request("http:
HttpPost httpPost = new HttpPost(request.getUri());
StringEntity entity = new StringEntity("", ContentType.create("application/json"));
httpPost.setEntity(entity);
String x = doRest(httpPost);
assertThat(x, containsString("Could not read document, no document?"));
}
String update_test_uri = "/document/v1/namespace/testdocument/docid/c";
String update_test_doc = "{\n" +
"\t\"fields\": {\n" +
"\"title\": {\n" +
"\"assign\": \"Oh lala\"\n" +
"}\n" +
"}\n" +
"}\n";
String update_test_response = "{\"id\":\"id:namespace:testdocument::c\"," +
"\"pathId\":\"/document/v1/namespace/testdocument/docid/c\"}";
@Test
public void testbasicUpdate() throws Exception {
Request request = new Request("http:
HttpPut httpPut = new HttpPut(request.getUri());
StringEntity entity = new StringEntity(update_test_doc, ContentType.create("application/json"));
httpPut.setEntity(entity);
assertThat(doRest(httpPut), is(update_test_response));
assertThat(getLog(), not(containsString("CREATE IF NON EXISTING IS TRUE")));
}
@Test
public void testbasicUpdateCreateTrue() throws Exception {
Request request = new Request("http:
HttpPut httpPut = new HttpPut(request.getUri());
StringEntity entity = new StringEntity(update_test_doc, ContentType.create("application/json"));
httpPut.setEntity(entity);
assertThat(doRest(httpPut), is(update_test_response));
assertThat(getLog(), containsString("CREATE IF NON EXISTENT IS TRUE"));
}
String update_test_create_if_non_existient_uri = "/document/v1/namespace/testdocument/docid/c";
String update_test_create_if_non_existient_doc = "{\n" +
"\"create\":true," +
"\t\"fields\": {\n" +
"\"title\": {\n" +
"\"assign\": \"Oh lala\"\n" +
"}\n" +
"}\n" +
"}\n";
String update_test_create_if_non_existing_response = "{\"id\":\"id:namespace:testdocument::c\"," +
"\"pathId\":\"/document/v1/namespace/testdocument/docid/c\"}";
@Test
public void testCreateIfNonExistingUpdateInDocTrue() throws Exception {
Request request = new Request("http:
HttpPut httpPut = new HttpPut(request.getUri());
StringEntity entity = new StringEntity(update_test_create_if_non_existient_doc, ContentType.create("application/json"));
httpPut.setEntity(entity);
assertThat(doRest(httpPut), is(update_test_create_if_non_existing_response));
assertThat(getLog(), containsString("CREATE IF NON EXISTENT IS TRUE"));
}
@Test
public void testCreateIfNonExistingUpdateInDocTrueButQueryParamsFalse() throws Exception {
Request request = new Request("http:
HttpPut httpPut = new HttpPut(request.getUri());
StringEntity entity = new StringEntity(update_test_create_if_non_existient_doc, ContentType.create("application/json"));
httpPut.setEntity(entity);
assertThat(doRest(httpPut), is(update_test_create_if_non_existing_response));
assertThat(getLog(), not(containsString("CREATE IF NON EXISTENT IS TRUE")));
}
String getLog() throws IOException {
Request request = new Request("http:
HttpDelete delete = new HttpDelete(request.getUri());
doRest(delete);
return doRest(delete);
}
String remove_test_uri = "/document/v1/namespace/testdocument/docid/c";
String remove_test_response = "{\"id\":\"id:namespace:testdocument::c\"," +
"\"pathId\":\"/document/v1/namespace/testdocument/docid/c\"}";
@Test
public void testbasicRemove() throws Exception {
Request request = new Request("http:
HttpDelete delete = new HttpDelete(request.getUri());
assertThat(doRest(delete), is(remove_test_response));
}
String get_test_uri = "/document/v1/namespace/document-type/docid/c";
String get_response_part1 = "\"pathId\":\"/document/v1/namespace/document-type/docid/c\"";
String get_response_part2 = "\"id\":\"id:namespace:document-type::c\"";
@Test
public void testbasicGet() throws Exception {
Request request = new Request("http:
HttpGet get = new HttpGet(request.getUri());
final String rest = doRest(get);
assertThat(rest, containsString(get_response_part1));
assertThat(rest, containsString(get_response_part2));
}
String id_test_uri = "/document/v1/namespace/document-type/docid/f/u/n/n/y/!";
String id_response_part1 = "\"pathId\":\"/document/v1/namespace/document-type/docid/f/u/n/n/y/!\"";
String id_response_part2 = "\"id\":\"id:namespace:document-type::f/u/n/n/y/!\"";
@Test
public void testSlashesInId() throws Exception {
Request request = new Request("http:
HttpGet get = new HttpGet(request.getUri());
final String rest = doRest(get);
assertThat(rest, containsString(id_response_part1));
assertThat(rest, containsString(id_response_part2));
}
String get_enc_id = "!\":æøå@/& Q1+";
String get_enc_id_encoded_v1 = "!%22%3A%C3%A6%C3%B8%C3%A5%40%2F%26%20Q1%2B";
String get_enc_id_encoded_v2 = "%21%22%3A%C3%A6%C3%B8%C3%A5%40%2F%26+Q1%2B";
String get_enc_test_uri_v1 = "/document/v1/namespace/document-type/docid/" + get_enc_id_encoded_v1;
String get_enc_test_uri_v2 = "/document/v1/namespace/document-type/docid/" + get_enc_id_encoded_v2;
String get_enc_response_part1 = "\"pathId\":\"/document/v1/namespace/document-type/docid/" + get_enc_id_encoded_v1 + "\"";
String get_enc_response_part1_v2 = "\"pathId\":\"/document/v1/namespace/document-type/docid/" + get_enc_id_encoded_v2 + "\"";
String get_enc_response_part2 = "\"id\":\"id:namespace:document-type::" + get_enc_id.replace("\"", "\\\"") + "\"";
@Test
public void testbasicEncodingV1() throws Exception {
Request request = new Request("http:
HttpGet get = new HttpGet(request.getUri());
final String rest = doRest(get);
assertThat(rest, containsString(get_enc_response_part1));
assertThat(rest, containsString(get_enc_response_part2));
}
@Test
public void testbasicEncodingV2() throws Exception {
Request request = new Request("http:
HttpGet get = new HttpGet(request.getUri());
String rest = doRest(get);
assertThat(rest, containsString(get_enc_response_part1_v2));
assertThat(rest, containsString(get_enc_response_part2));
}
@Test
public void get_fieldset_parameter_is_propagated() throws IOException {
Request request = new Request(String.format("http:
HttpGet get = new HttpGet(request.getUri());
String rest = doRest(get);
assertThat(rest, containsString("\"fieldset\":\"foo,baz\""));
}
String visit_test_uri = "/document/v1/namespace/document-type/docid/?continuation=abc";
String visit_response_part1 = "\"documents\":[List of json docs, cont token abc, doc selection: '']";
String visit_response_part2 = "\"continuation\":\"token\"";
String visit_response_part3 = "\"pathId\":\"/document/v1/namespace/document-type/docid/\"";
@Test
public void testbasicVisit() throws Exception {
Request request = new Request("http:
HttpGet get = new HttpGet(request.getUri());
String rest = doRest(get);
assertThat(rest, containsString(visit_response_part1));
assertThat(rest, containsString(visit_response_part2));
assertThat(rest, containsString(visit_response_part3));
}
private static String encoded(String original) {
try {
return URLEncoder.encode(original, StandardCharsets.UTF_8.name());
} catch (UnsupportedEncodingException e) {
throw new RuntimeException(e);
}
}
private String performV1RestCall(String pathSuffix) {
try {
Request request = new Request(String.format("http:
getFirstListenPort(), pathSuffix));
HttpGet get = new HttpGet(request.getUri());
return doRest(get);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
private void assertResultingDocumentSelection(String suffix, String expected) {
String output = performV1RestCall(suffix);
assertThat(output, containsString(String.format("doc selection: '%s'", expected)));
}
@Test
private void assertGroupDocumentSelection(String group, String expected) {
assertResultingDocumentSelection("group/" + encoded(group), expected);
}
@Test
public void group_strings_are_escaped() {
assertGroupDocumentSelection("'", "id.group=='\\''");
assertGroupDocumentSelection("hello 'world'", "id.group=='hello \\'world\\''");
assertGroupDocumentSelection("' goodbye moon", "id.group=='\\' goodbye moon'");
}
private void assertNumericIdFailsParsing(String id) {
String output = performV1RestCall(String.format("number/%s", encoded(id)));
assertThat(output, containsString("Failed to parse numeric part of selection URI"));
}
@Test
public void invalid_numeric_id_returns_error() {
assertNumericIdFailsParsing("123a");
assertNumericIdFailsParsing("a123");
assertNumericIdFailsParsing("0x1234");
assertNumericIdFailsParsing("\u0000");
}
@Test
public void non_text_group_string_character_returns_error() {
String output = performV1RestCall(String.format("group/%s", encoded("\u001f")));
assertThat(output, containsString("Failed to parse group part of selection URI; contains invalid text code point U001F"));
}
@Test
public void can_specify_numeric_id_without_explicit_selection() {
assertResultingDocumentSelection("number/1234", "id.user==1234");
}
@Test
public void can_specify_group_id_without_explicit_selection() {
assertResultingDocumentSelection("group/foo", "id.group=='foo'");
}
@Test
public void can_specify_both_numeric_id_and_explicit_selection() {
assertResultingDocumentSelection(String.format("number/1234?selection=%s", encoded("1 != 2")),
"id.user==1234 and (1 != 2)");
}
@Test
public void can_specify_both_group_id_and_explicit_selection() {
assertResultingDocumentSelection(String.format("group/bar?selection=%s", encoded("3 != 4")),
"id.group=='bar' and (3 != 4)");
}
private void assertDocumentSelectionFailsParsing(String expression) {
String output = performV1RestCall(String.format("number/1234?selection=%s", encoded(expression)));
assertThat(output, containsString("Failed to parse expression given in 'selection' parameter. Must be a complete and valid sub-expression."));
}
@Test
public void explicit_selection_sub_expression_is_validated_for_completeness() {
assertDocumentSelectionFailsParsing("1 +");
assertDocumentSelectionFailsParsing(") or true");
assertDocumentSelectionFailsParsing("((1 + 2)");
assertDocumentSelectionFailsParsing("true) or (true");
}
@Test
public void wanted_document_count_returned_parameter_is_propagated() throws IOException {
Request request = new Request(String.format("http:
HttpGet get = new HttpGet(request.getUri());
String rest = doRest(get);
assertThat(rest, containsString("min docs returned: 321"));
}
@Test
public void invalid_wanted_document_count_parameter_returns_error_response() throws IOException {
Request request = new Request(String.format("http:
HttpGet get = new HttpGet(request.getUri());
String rest = doRest(get);
assertThat(rest, containsString("Invalid 'wantedDocumentCount' value. Expected positive integer"));
}
@Test
public void negative_document_count_parameter_returns_error_response() throws IOException {
Request request = new Request(String.format("http:
HttpGet get = new HttpGet(request.getUri());
String rest = doRest(get);
assertThat(rest, containsString("Invalid 'wantedDocumentCount' value. Expected positive integer"));
}
@Test
public void visit_fieldset_parameter_is_propagated() throws IOException {
Request request = new Request(String.format("http:
HttpGet get = new HttpGet(request.getUri());
String rest = doRest(get);
assertThat(rest, containsString("field set: 'foo,baz'"));
}
@Test
public void visit_concurrency_parameter_is_propagated() throws IOException {
Request request = new Request(String.format("http:
HttpGet get = new HttpGet(request.getUri());
String rest = doRest(get);
assertThat(rest, containsString("concurrency: 42"));
}
@Test
public void invalid_visit_concurrency_parameter_returns_error_response() throws IOException {
Request request = new Request(String.format("http:
HttpGet get = new HttpGet(request.getUri());
String rest = doRest(get);
assertThat(rest, containsString("Invalid 'concurrency' value. Expected positive integer"));
}
private String doRest(HttpRequestBase request) throws IOException {
HttpClient client = HttpClientBuilder.create().build();
HttpResponse response = client.execute(request);
assertThat(response.getEntity().getContentType().getValue().toString(), startsWith("application/json;"));
HttpEntity entity = response.getEntity();
return EntityUtils.toString(entity);
}
private String getFirstListenPort() {
JettyHttpServer serverProvider =
(JettyHttpServer) Container.get().getServerProviderRegistry().allComponents().get(0);
return Integer.toString(serverProvider.getListenPort());
}
} |
Why was there 1 `=` in `documentSelection` before, but 2 `=` now? | private HttpResponse handleVisit(RestUri restUri, HttpRequest request) throws RestApiException {
String documentSelection;
OperationHandler.VisitOptions options;
try {
documentSelection = documentSelectionFromRequest(restUri, request);
options = visitOptionsFromRequest(request);
} catch (BadRequestParameterException e) {
return createInvalidParameterResponse(e.getParameter(), e.getMessage());
}
final OperationHandler.VisitResult visit = operationHandler.visit(restUri, documentSelection, options);
final ObjectNode resultNode = mapper.createObjectNode();
visit.token.ifPresent(t -> resultNode.put(CONTINUATION, t));
resultNode.putArray(DOCUMENTS).addPOJO(visit.documentsAsJsonList);
resultNode.put(PATH_NAME, restUri.getRawPath());
HttpResponse httpResponse = new HttpResponse(200) {
@Override
public String getContentType() { return APPLICATION_JSON; }
@Override
public void render(OutputStream outputStream) throws IOException {
try {
outputStream.write(resultNode.toString().getBytes(StandardCharsets.UTF_8));
} catch (Exception e) {
throw new RuntimeException(e);
}
}
};
return httpResponse;
} | documentSelection = documentSelectionFromRequest(restUri, request); | private HttpResponse handleVisit(RestUri restUri, HttpRequest request) throws RestApiException {
String documentSelection;
OperationHandler.VisitOptions options;
try {
documentSelection = documentSelectionFromRequest(restUri, request);
options = visitOptionsFromRequest(request);
} catch (BadRequestParameterException e) {
return createInvalidParameterResponse(e.getParameter(), e.getMessage());
}
final OperationHandler.VisitResult visit = operationHandler.visit(restUri, documentSelection, options);
final ObjectNode resultNode = mapper.createObjectNode();
visit.token.ifPresent(t -> resultNode.put(CONTINUATION, t));
resultNode.putArray(DOCUMENTS).addPOJO(visit.documentsAsJsonList);
resultNode.put(PATH_NAME, restUri.getRawPath());
HttpResponse httpResponse = new HttpResponse(200) {
@Override
public String getContentType() { return APPLICATION_JSON; }
@Override
public void render(OutputStream outputStream) throws IOException {
try {
outputStream.write(resultNode.toString().getBytes(StandardCharsets.UTF_8));
} catch (Exception e) {
throw new RuntimeException(e);
}
}
};
return httpResponse;
} | class BadRequestParameterException extends IllegalArgumentException {
private String parameter;
BadRequestParameterException(String parameter, String message) {
super(message);
this.parameter = parameter;
}
String getParameter() {
return parameter;
}
} | class BadRequestParameterException extends IllegalArgumentException {
private String parameter;
BadRequestParameterException(String parameter, String message) {
super(message);
this.parameter = parameter;
}
String getParameter() {
return parameter;
}
} |
Are you referring to ```java if (documentSelection.isEmpty()) { documentSelection = locationSubExpression; } else { documentSelection = String.format("%s and (%s)", locationSubExpression, documentSelection); } ``` in `documentSelectionFromRequest`? If so, that's because the old code didn't allow specifying both a selection and a number/group at the same time. Now it will generate a conjunctive expression iff both are present. The data flow legibility in this function could probably be improved. | private HttpResponse handleVisit(RestUri restUri, HttpRequest request) throws RestApiException {
String documentSelection;
OperationHandler.VisitOptions options;
try {
documentSelection = documentSelectionFromRequest(restUri, request);
options = visitOptionsFromRequest(request);
} catch (BadRequestParameterException e) {
return createInvalidParameterResponse(e.getParameter(), e.getMessage());
}
final OperationHandler.VisitResult visit = operationHandler.visit(restUri, documentSelection, options);
final ObjectNode resultNode = mapper.createObjectNode();
visit.token.ifPresent(t -> resultNode.put(CONTINUATION, t));
resultNode.putArray(DOCUMENTS).addPOJO(visit.documentsAsJsonList);
resultNode.put(PATH_NAME, restUri.getRawPath());
HttpResponse httpResponse = new HttpResponse(200) {
@Override
public String getContentType() { return APPLICATION_JSON; }
@Override
public void render(OutputStream outputStream) throws IOException {
try {
outputStream.write(resultNode.toString().getBytes(StandardCharsets.UTF_8));
} catch (Exception e) {
throw new RuntimeException(e);
}
}
};
return httpResponse;
} | documentSelection = documentSelectionFromRequest(restUri, request); | private HttpResponse handleVisit(RestUri restUri, HttpRequest request) throws RestApiException {
String documentSelection;
OperationHandler.VisitOptions options;
try {
documentSelection = documentSelectionFromRequest(restUri, request);
options = visitOptionsFromRequest(request);
} catch (BadRequestParameterException e) {
return createInvalidParameterResponse(e.getParameter(), e.getMessage());
}
final OperationHandler.VisitResult visit = operationHandler.visit(restUri, documentSelection, options);
final ObjectNode resultNode = mapper.createObjectNode();
visit.token.ifPresent(t -> resultNode.put(CONTINUATION, t));
resultNode.putArray(DOCUMENTS).addPOJO(visit.documentsAsJsonList);
resultNode.put(PATH_NAME, restUri.getRawPath());
HttpResponse httpResponse = new HttpResponse(200) {
@Override
public String getContentType() { return APPLICATION_JSON; }
@Override
public void render(OutputStream outputStream) throws IOException {
try {
outputStream.write(resultNode.toString().getBytes(StandardCharsets.UTF_8));
} catch (Exception e) {
throw new RuntimeException(e);
}
}
};
return httpResponse;
} | class BadRequestParameterException extends IllegalArgumentException {
private String parameter;
BadRequestParameterException(String parameter, String message) {
super(message);
this.parameter = parameter;
}
String getParameter() {
return parameter;
}
} | class BadRequestParameterException extends IllegalArgumentException {
private String parameter;
BadRequestParameterException(String parameter, String message) {
super(message);
this.parameter = parameter;
}
String getParameter() {
return parameter;
}
} |
Fixed | public void testUseExpressionOnVisit() throws Exception {
Request request = new Request("http:
HttpGet get = new HttpGet(request.getUri());
String rest = doRest(get);
assertThat(rest, containsString(visit_test_response_selection_rewrite));
} | Request request = new Request("http: | public void testUseExpressionOnVisit() throws Exception {
assertResultingDocumentSelection("group/abc?continuation=xyz", "id.group=='abc'");
} | class RestApiTest {
Application application;
@Before
public void setup() throws Exception {
application = Application.fromApplicationPackage(Paths.get("src/test/rest-api-application"), Networking.enable);
}
@After
public void tearDown() throws Exception {
application.close();
}
String post_test_uri = "/document/v1/namespace/testdocument/docid/c";
String post_test_doc = "{\n" +
"\"foo\" : \"bar\"," +
"\"fields\": {\n" +
"\"title\": \"This is the title\",\n" +
"\"body\": \"This is the body\"" +
"}" +
"}";
String post_test_response = "{\"id\":\"id:namespace:testdocument::c\"," +
"\"pathId\":\"/document/v1/namespace/testdocument/docid/c\"}";
@Ignore
@Test
public void blockingTest() throws Exception {
System.out.println("Running on port " + getFirstListenPort());
Thread.sleep(Integer.MAX_VALUE);
}
@Test
public void testbasicPost() throws Exception {
Request request = new Request("http:
HttpPost httpPost = new HttpPost(request.getUri());
StringEntity entity = new StringEntity(post_test_doc, ContentType.create("application/json"));
httpPost.setEntity(entity);
String x = doRest(httpPost);
assertThat(x, is(post_test_response));
}
String post_test_uri_cond = "/document/v1/namespace/testdocument/docid/c?condition=foo";
String post_test_doc_cond = "{\n" +
"\"foo\" : \"bar\"," +
"\"fields\": {\n" +
"\"title\": \"This is the title\",\n" +
"\"body\": \"This is the body\"" +
"}" +
"}";
String post_test_response_cond = "{\"id\":\"id:namespace:testdocument::c\"," +
"\"pathId\":\"/document/v1/namespace/testdocument/docid/c\"}";
@Test
public void testConditionalPost() throws Exception {
Request request = new Request("http:
HttpPost httpPost = new HttpPost(request.getUri());
StringEntity entity = new StringEntity(post_test_doc_cond, ContentType.create("application/json"));
httpPost.setEntity(entity);
String x = doRest(httpPost);
assertThat(x, is(post_test_response_cond));
}
@Test
public void testEmptyPost() throws Exception {
Request request = new Request("http:
HttpPost httpPost = new HttpPost(request.getUri());
StringEntity entity = new StringEntity("", ContentType.create("application/json"));
httpPost.setEntity(entity);
String x = doRest(httpPost);
assertThat(x, containsString("Could not read document, no document?"));
}
String update_test_uri = "/document/v1/namespace/testdocument/docid/c";
String update_test_doc = "{\n" +
"\t\"fields\": {\n" +
"\"title\": {\n" +
"\"assign\": \"Oh lala\"\n" +
"}\n" +
"}\n" +
"}\n";
String update_test_response = "{\"id\":\"id:namespace:testdocument::c\"," +
"\"pathId\":\"/document/v1/namespace/testdocument/docid/c\"}";
@Test
public void testbasicUpdate() throws Exception {
Request request = new Request("http:
HttpPut httpPut = new HttpPut(request.getUri());
StringEntity entity = new StringEntity(update_test_doc, ContentType.create("application/json"));
httpPut.setEntity(entity);
assertThat(doRest(httpPut), is(update_test_response));
assertThat(getLog(), not(containsString("CREATE IF NON EXISTING IS TRUE")));
}
@Test
public void testbasicUpdateCreateTrue() throws Exception {
Request request = new Request("http:
HttpPut httpPut = new HttpPut(request.getUri());
StringEntity entity = new StringEntity(update_test_doc, ContentType.create("application/json"));
httpPut.setEntity(entity);
assertThat(doRest(httpPut), is(update_test_response));
assertThat(getLog(), containsString("CREATE IF NON EXISTENT IS TRUE"));
}
String update_test_create_if_non_existient_uri = "/document/v1/namespace/testdocument/docid/c";
String update_test_create_if_non_existient_doc = "{\n" +
"\"create\":true," +
"\t\"fields\": {\n" +
"\"title\": {\n" +
"\"assign\": \"Oh lala\"\n" +
"}\n" +
"}\n" +
"}\n";
String update_test_create_if_non_existing_response = "{\"id\":\"id:namespace:testdocument::c\"," +
"\"pathId\":\"/document/v1/namespace/testdocument/docid/c\"}";
@Test
public void testCreateIfNonExistingUpdateInDocTrue() throws Exception {
Request request = new Request("http:
HttpPut httpPut = new HttpPut(request.getUri());
StringEntity entity = new StringEntity(update_test_create_if_non_existient_doc, ContentType.create("application/json"));
httpPut.setEntity(entity);
assertThat(doRest(httpPut), is(update_test_create_if_non_existing_response));
assertThat(getLog(), containsString("CREATE IF NON EXISTENT IS TRUE"));
}
@Test
public void testCreateIfNonExistingUpdateInDocTrueButQueryParamsFalse() throws Exception {
Request request = new Request("http:
HttpPut httpPut = new HttpPut(request.getUri());
StringEntity entity = new StringEntity(update_test_create_if_non_existient_doc, ContentType.create("application/json"));
httpPut.setEntity(entity);
assertThat(doRest(httpPut), is(update_test_create_if_non_existing_response));
assertThat(getLog(), not(containsString("CREATE IF NON EXISTENT IS TRUE")));
}
String getLog() throws IOException {
Request request = new Request("http:
HttpDelete delete = new HttpDelete(request.getUri());
doRest(delete);
return doRest(delete);
}
String remove_test_uri = "/document/v1/namespace/testdocument/docid/c";
String remove_test_response = "{\"id\":\"id:namespace:testdocument::c\"," +
"\"pathId\":\"/document/v1/namespace/testdocument/docid/c\"}";
@Test
public void testbasicRemove() throws Exception {
Request request = new Request("http:
HttpDelete delete = new HttpDelete(request.getUri());
assertThat(doRest(delete), is(remove_test_response));
}
String get_test_uri = "/document/v1/namespace/document-type/docid/c";
String get_response_part1 = "\"pathId\":\"/document/v1/namespace/document-type/docid/c\"";
String get_response_part2 = "\"id\":\"id:namespace:document-type::c\"";
@Test
public void testbasicGet() throws Exception {
Request request = new Request("http:
HttpGet get = new HttpGet(request.getUri());
final String rest = doRest(get);
assertThat(rest, containsString(get_response_part1));
assertThat(rest, containsString(get_response_part2));
}
String id_test_uri = "/document/v1/namespace/document-type/docid/f/u/n/n/y/!";
String id_response_part1 = "\"pathId\":\"/document/v1/namespace/document-type/docid/f/u/n/n/y/!\"";
String id_response_part2 = "\"id\":\"id:namespace:document-type::f/u/n/n/y/!\"";
@Test
public void testSlashesInId() throws Exception {
Request request = new Request("http:
HttpGet get = new HttpGet(request.getUri());
final String rest = doRest(get);
assertThat(rest, containsString(id_response_part1));
assertThat(rest, containsString(id_response_part2));
}
String get_enc_id = "!\":æøå@/& Q1+";
String get_enc_id_encoded_v1 = "!%22%3A%C3%A6%C3%B8%C3%A5%40%2F%26%20Q1%2B";
String get_enc_id_encoded_v2 = "%21%22%3A%C3%A6%C3%B8%C3%A5%40%2F%26+Q1%2B";
String get_enc_test_uri_v1 = "/document/v1/namespace/document-type/docid/" + get_enc_id_encoded_v1;
String get_enc_test_uri_v2 = "/document/v1/namespace/document-type/docid/" + get_enc_id_encoded_v2;
String get_enc_response_part1 = "\"pathId\":\"/document/v1/namespace/document-type/docid/" + get_enc_id_encoded_v1 + "\"";
String get_enc_response_part1_v2 = "\"pathId\":\"/document/v1/namespace/document-type/docid/" + get_enc_id_encoded_v2 + "\"";
String get_enc_response_part2 = "\"id\":\"id:namespace:document-type::" + get_enc_id.replace("\"", "\\\"") + "\"";
@Test
public void testbasicEncodingV1() throws Exception {
Request request = new Request("http:
HttpGet get = new HttpGet(request.getUri());
final String rest = doRest(get);
assertThat(rest, containsString(get_enc_response_part1));
assertThat(rest, containsString(get_enc_response_part2));
}
@Test
public void testbasicEncodingV2() throws Exception {
Request request = new Request("http:
HttpGet get = new HttpGet(request.getUri());
String rest = doRest(get);
assertThat(rest, containsString(get_enc_response_part1_v2));
assertThat(rest, containsString(get_enc_response_part2));
}
@Test
public void get_fieldset_parameter_is_propagated() throws IOException {
Request request = new Request(String.format("http:
HttpGet get = new HttpGet(request.getUri());
String rest = doRest(get);
assertThat(rest, containsString("\"fieldset\":\"foo,baz\""));
}
String visit_test_uri = "/document/v1/namespace/document-type/docid/?continuation=abc";
String visit_response_part1 = "\"documents\":[List of json docs, cont token abc, doc selection: '']";
String visit_response_part2 = "\"continuation\":\"token\"";
String visit_response_part3 = "\"pathId\":\"/document/v1/namespace/document-type/docid/\"";
@Test
public void testbasicVisit() throws Exception {
Request request = new Request("http:
HttpGet get = new HttpGet(request.getUri());
String rest = doRest(get);
assertThat(rest, containsString(visit_response_part1));
assertThat(rest, containsString(visit_response_part2));
assertThat(rest, containsString(visit_response_part3));
}
String visit_test_uri_selection_rewrite = "/document/v1/namespace/document-type/group/abc?continuation=abc";
String visit_test_response_selection_rewrite = "doc selection: 'id.group=='abc''";
@Test
private static String encoded(String original) {
try {
return URLEncoder.encode(original, StandardCharsets.UTF_8.name());
} catch (UnsupportedEncodingException e) {
throw new RuntimeException(e);
}
}
private String performV1RestCall(String pathSuffix) {
try {
Request request = new Request(String.format("http:
getFirstListenPort(), pathSuffix));
HttpGet get = new HttpGet(request.getUri());
return doRest(get);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
private void assertResultingDocumentSelection(String suffix, String expected) {
String output = performV1RestCall(suffix);
assertThat(output, containsString(String.format("doc selection: '%s'", expected)));
}
private void assertGroupDocumentSelection(String group, String expected) {
assertResultingDocumentSelection("group/" + encoded(group), expected);
}
@Test
public void group_strings_are_escaped() {
assertGroupDocumentSelection("'", "id.group=='\\''");
assertGroupDocumentSelection("hello 'world'", "id.group=='hello \\'world\\''");
assertGroupDocumentSelection("' goodbye moon", "id.group=='\\' goodbye moon'");
}
private void assertNumericIdFailsParsing(String id) {
String output = performV1RestCall(String.format("number/%s", encoded(id)));
assertThat(output, containsString("Failed to parse numeric part of selection URI"));
}
@Test
public void invalid_numeric_id_returns_error() {
assertNumericIdFailsParsing("123a");
assertNumericIdFailsParsing("a123");
assertNumericIdFailsParsing("0x1234");
assertNumericIdFailsParsing("\u0000");
}
@Test
public void non_text_group_string_character_returns_error() {
String output = performV1RestCall(String.format("group/%s", encoded("\u001f")));
assertThat(output, containsString("Failed to parse group part of selection URI; contains invalid text code point U001F"));
}
@Test
public void can_specify_numeric_id_without_explicit_selection() {
assertResultingDocumentSelection("number/1234", "id.user==1234");
}
@Test
public void can_specify_group_id_without_explicit_selection() {
assertResultingDocumentSelection("group/foo", "id.group=='foo'");
}
@Test
public void can_specify_both_numeric_id_and_explicit_selection() {
assertResultingDocumentSelection(String.format("number/1234?selection=%s", encoded("1 != 2")),
"id.user==1234 and (1 != 2)");
}
@Test
public void can_specify_both_group_id_and_explicit_selection() {
assertResultingDocumentSelection(String.format("group/bar?selection=%s", encoded("3 != 4")),
"id.group=='bar' and (3 != 4)");
}
private void assertDocumentSelectionFailsParsing(String expression) {
String output = performV1RestCall(String.format("number/1234?selection=%s", encoded(expression)));
assertThat(output, containsString("Failed to parse expression given in 'selection' parameter. Must be a complete and valid sub-expression."));
}
@Test
public void explicit_selection_sub_expression_is_validated_for_completeness() {
assertDocumentSelectionFailsParsing("1 +");
assertDocumentSelectionFailsParsing(") or true");
assertDocumentSelectionFailsParsing("((1 + 2)");
assertDocumentSelectionFailsParsing("true) or (true");
}
@Test
public void wanted_document_count_returned_parameter_is_propagated() throws IOException {
Request request = new Request(String.format("http:
HttpGet get = new HttpGet(request.getUri());
String rest = doRest(get);
assertThat(rest, containsString("min docs returned: 321"));
}
@Test
public void invalid_wanted_document_count_parameter_returns_error_response() throws IOException {
Request request = new Request(String.format("http:
HttpGet get = new HttpGet(request.getUri());
String rest = doRest(get);
assertThat(rest, containsString("Invalid 'wantedDocumentCount' value. Expected positive integer"));
}
@Test
public void negative_document_count_parameter_returns_error_response() throws IOException {
Request request = new Request(String.format("http:
HttpGet get = new HttpGet(request.getUri());
String rest = doRest(get);
assertThat(rest, containsString("Invalid 'wantedDocumentCount' value. Expected positive integer"));
}
@Test
public void visit_fieldset_parameter_is_propagated() throws IOException {
Request request = new Request(String.format("http:
HttpGet get = new HttpGet(request.getUri());
String rest = doRest(get);
assertThat(rest, containsString("field set: 'foo,baz'"));
}
@Test
public void visit_concurrency_parameter_is_propagated() throws IOException {
Request request = new Request(String.format("http:
HttpGet get = new HttpGet(request.getUri());
String rest = doRest(get);
assertThat(rest, containsString("concurrency: 42"));
}
@Test
public void invalid_visit_concurrency_parameter_returns_error_response() throws IOException {
Request request = new Request(String.format("http:
HttpGet get = new HttpGet(request.getUri());
String rest = doRest(get);
assertThat(rest, containsString("Invalid 'concurrency' value. Expected positive integer"));
}
private String doRest(HttpRequestBase request) throws IOException {
HttpClient client = HttpClientBuilder.create().build();
HttpResponse response = client.execute(request);
assertThat(response.getEntity().getContentType().getValue().toString(), startsWith("application/json;"));
HttpEntity entity = response.getEntity();
return EntityUtils.toString(entity);
}
private String getFirstListenPort() {
JettyHttpServer serverProvider =
(JettyHttpServer) Container.get().getServerProviderRegistry().allComponents().get(0);
return Integer.toString(serverProvider.getListenPort());
}
} | class RestApiTest {
Application application;
@Before
public void setup() throws Exception {
application = Application.fromApplicationPackage(Paths.get("src/test/rest-api-application"), Networking.enable);
}
@After
public void tearDown() throws Exception {
application.close();
}
String post_test_uri = "/document/v1/namespace/testdocument/docid/c";
String post_test_doc = "{\n" +
"\"foo\" : \"bar\"," +
"\"fields\": {\n" +
"\"title\": \"This is the title\",\n" +
"\"body\": \"This is the body\"" +
"}" +
"}";
String post_test_response = "{\"id\":\"id:namespace:testdocument::c\"," +
"\"pathId\":\"/document/v1/namespace/testdocument/docid/c\"}";
@Ignore
@Test
public void blockingTest() throws Exception {
System.out.println("Running on port " + getFirstListenPort());
Thread.sleep(Integer.MAX_VALUE);
}
@Test
public void testbasicPost() throws Exception {
Request request = new Request("http:
HttpPost httpPost = new HttpPost(request.getUri());
StringEntity entity = new StringEntity(post_test_doc, ContentType.create("application/json"));
httpPost.setEntity(entity);
String x = doRest(httpPost);
assertThat(x, is(post_test_response));
}
String post_test_uri_cond = "/document/v1/namespace/testdocument/docid/c?condition=foo";
String post_test_doc_cond = "{\n" +
"\"foo\" : \"bar\"," +
"\"fields\": {\n" +
"\"title\": \"This is the title\",\n" +
"\"body\": \"This is the body\"" +
"}" +
"}";
String post_test_response_cond = "{\"id\":\"id:namespace:testdocument::c\"," +
"\"pathId\":\"/document/v1/namespace/testdocument/docid/c\"}";
@Test
public void testConditionalPost() throws Exception {
Request request = new Request("http:
HttpPost httpPost = new HttpPost(request.getUri());
StringEntity entity = new StringEntity(post_test_doc_cond, ContentType.create("application/json"));
httpPost.setEntity(entity);
String x = doRest(httpPost);
assertThat(x, is(post_test_response_cond));
}
@Test
public void testEmptyPost() throws Exception {
Request request = new Request("http:
HttpPost httpPost = new HttpPost(request.getUri());
StringEntity entity = new StringEntity("", ContentType.create("application/json"));
httpPost.setEntity(entity);
String x = doRest(httpPost);
assertThat(x, containsString("Could not read document, no document?"));
}
String update_test_uri = "/document/v1/namespace/testdocument/docid/c";
String update_test_doc = "{\n" +
"\t\"fields\": {\n" +
"\"title\": {\n" +
"\"assign\": \"Oh lala\"\n" +
"}\n" +
"}\n" +
"}\n";
String update_test_response = "{\"id\":\"id:namespace:testdocument::c\"," +
"\"pathId\":\"/document/v1/namespace/testdocument/docid/c\"}";
@Test
public void testbasicUpdate() throws Exception {
Request request = new Request("http:
HttpPut httpPut = new HttpPut(request.getUri());
StringEntity entity = new StringEntity(update_test_doc, ContentType.create("application/json"));
httpPut.setEntity(entity);
assertThat(doRest(httpPut), is(update_test_response));
assertThat(getLog(), not(containsString("CREATE IF NON EXISTING IS TRUE")));
}
@Test
public void testbasicUpdateCreateTrue() throws Exception {
Request request = new Request("http:
HttpPut httpPut = new HttpPut(request.getUri());
StringEntity entity = new StringEntity(update_test_doc, ContentType.create("application/json"));
httpPut.setEntity(entity);
assertThat(doRest(httpPut), is(update_test_response));
assertThat(getLog(), containsString("CREATE IF NON EXISTENT IS TRUE"));
}
String update_test_create_if_non_existient_uri = "/document/v1/namespace/testdocument/docid/c";
String update_test_create_if_non_existient_doc = "{\n" +
"\"create\":true," +
"\t\"fields\": {\n" +
"\"title\": {\n" +
"\"assign\": \"Oh lala\"\n" +
"}\n" +
"}\n" +
"}\n";
String update_test_create_if_non_existing_response = "{\"id\":\"id:namespace:testdocument::c\"," +
"\"pathId\":\"/document/v1/namespace/testdocument/docid/c\"}";
@Test
public void testCreateIfNonExistingUpdateInDocTrue() throws Exception {
Request request = new Request("http:
HttpPut httpPut = new HttpPut(request.getUri());
StringEntity entity = new StringEntity(update_test_create_if_non_existient_doc, ContentType.create("application/json"));
httpPut.setEntity(entity);
assertThat(doRest(httpPut), is(update_test_create_if_non_existing_response));
assertThat(getLog(), containsString("CREATE IF NON EXISTENT IS TRUE"));
}
@Test
public void testCreateIfNonExistingUpdateInDocTrueButQueryParamsFalse() throws Exception {
Request request = new Request("http:
HttpPut httpPut = new HttpPut(request.getUri());
StringEntity entity = new StringEntity(update_test_create_if_non_existient_doc, ContentType.create("application/json"));
httpPut.setEntity(entity);
assertThat(doRest(httpPut), is(update_test_create_if_non_existing_response));
assertThat(getLog(), not(containsString("CREATE IF NON EXISTENT IS TRUE")));
}
String getLog() throws IOException {
Request request = new Request("http:
HttpDelete delete = new HttpDelete(request.getUri());
doRest(delete);
return doRest(delete);
}
String remove_test_uri = "/document/v1/namespace/testdocument/docid/c";
String remove_test_response = "{\"id\":\"id:namespace:testdocument::c\"," +
"\"pathId\":\"/document/v1/namespace/testdocument/docid/c\"}";
@Test
public void testbasicRemove() throws Exception {
Request request = new Request("http:
HttpDelete delete = new HttpDelete(request.getUri());
assertThat(doRest(delete), is(remove_test_response));
}
String get_test_uri = "/document/v1/namespace/document-type/docid/c";
String get_response_part1 = "\"pathId\":\"/document/v1/namespace/document-type/docid/c\"";
String get_response_part2 = "\"id\":\"id:namespace:document-type::c\"";
@Test
public void testbasicGet() throws Exception {
Request request = new Request("http:
HttpGet get = new HttpGet(request.getUri());
final String rest = doRest(get);
assertThat(rest, containsString(get_response_part1));
assertThat(rest, containsString(get_response_part2));
}
String id_test_uri = "/document/v1/namespace/document-type/docid/f/u/n/n/y/!";
String id_response_part1 = "\"pathId\":\"/document/v1/namespace/document-type/docid/f/u/n/n/y/!\"";
String id_response_part2 = "\"id\":\"id:namespace:document-type::f/u/n/n/y/!\"";
@Test
public void testSlashesInId() throws Exception {
Request request = new Request("http:
HttpGet get = new HttpGet(request.getUri());
final String rest = doRest(get);
assertThat(rest, containsString(id_response_part1));
assertThat(rest, containsString(id_response_part2));
}
String get_enc_id = "!\":æøå@/& Q1+";
String get_enc_id_encoded_v1 = "!%22%3A%C3%A6%C3%B8%C3%A5%40%2F%26%20Q1%2B";
String get_enc_id_encoded_v2 = "%21%22%3A%C3%A6%C3%B8%C3%A5%40%2F%26+Q1%2B";
String get_enc_test_uri_v1 = "/document/v1/namespace/document-type/docid/" + get_enc_id_encoded_v1;
String get_enc_test_uri_v2 = "/document/v1/namespace/document-type/docid/" + get_enc_id_encoded_v2;
String get_enc_response_part1 = "\"pathId\":\"/document/v1/namespace/document-type/docid/" + get_enc_id_encoded_v1 + "\"";
String get_enc_response_part1_v2 = "\"pathId\":\"/document/v1/namespace/document-type/docid/" + get_enc_id_encoded_v2 + "\"";
String get_enc_response_part2 = "\"id\":\"id:namespace:document-type::" + get_enc_id.replace("\"", "\\\"") + "\"";
@Test
public void testbasicEncodingV1() throws Exception {
Request request = new Request("http:
HttpGet get = new HttpGet(request.getUri());
final String rest = doRest(get);
assertThat(rest, containsString(get_enc_response_part1));
assertThat(rest, containsString(get_enc_response_part2));
}
@Test
public void testbasicEncodingV2() throws Exception {
Request request = new Request("http:
HttpGet get = new HttpGet(request.getUri());
String rest = doRest(get);
assertThat(rest, containsString(get_enc_response_part1_v2));
assertThat(rest, containsString(get_enc_response_part2));
}
@Test
public void get_fieldset_parameter_is_propagated() throws IOException {
Request request = new Request(String.format("http:
HttpGet get = new HttpGet(request.getUri());
String rest = doRest(get);
assertThat(rest, containsString("\"fieldset\":\"foo,baz\""));
}
String visit_test_uri = "/document/v1/namespace/document-type/docid/?continuation=abc";
String visit_response_part1 = "\"documents\":[List of json docs, cont token abc, doc selection: '']";
String visit_response_part2 = "\"continuation\":\"token\"";
String visit_response_part3 = "\"pathId\":\"/document/v1/namespace/document-type/docid/\"";
@Test
public void testbasicVisit() throws Exception {
Request request = new Request("http:
HttpGet get = new HttpGet(request.getUri());
String rest = doRest(get);
assertThat(rest, containsString(visit_response_part1));
assertThat(rest, containsString(visit_response_part2));
assertThat(rest, containsString(visit_response_part3));
}
private static String encoded(String original) {
try {
return URLEncoder.encode(original, StandardCharsets.UTF_8.name());
} catch (UnsupportedEncodingException e) {
throw new RuntimeException(e);
}
}
private String performV1RestCall(String pathSuffix) {
try {
Request request = new Request(String.format("http:
getFirstListenPort(), pathSuffix));
HttpGet get = new HttpGet(request.getUri());
return doRest(get);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
private void assertResultingDocumentSelection(String suffix, String expected) {
String output = performV1RestCall(suffix);
assertThat(output, containsString(String.format("doc selection: '%s'", expected)));
}
@Test
private void assertGroupDocumentSelection(String group, String expected) {
assertResultingDocumentSelection("group/" + encoded(group), expected);
}
@Test
public void group_strings_are_escaped() {
assertGroupDocumentSelection("'", "id.group=='\\''");
assertGroupDocumentSelection("hello 'world'", "id.group=='hello \\'world\\''");
assertGroupDocumentSelection("' goodbye moon", "id.group=='\\' goodbye moon'");
}
private void assertNumericIdFailsParsing(String id) {
String output = performV1RestCall(String.format("number/%s", encoded(id)));
assertThat(output, containsString("Failed to parse numeric part of selection URI"));
}
@Test
public void invalid_numeric_id_returns_error() {
assertNumericIdFailsParsing("123a");
assertNumericIdFailsParsing("a123");
assertNumericIdFailsParsing("0x1234");
assertNumericIdFailsParsing("\u0000");
}
@Test
public void non_text_group_string_character_returns_error() {
String output = performV1RestCall(String.format("group/%s", encoded("\u001f")));
assertThat(output, containsString("Failed to parse group part of selection URI; contains invalid text code point U001F"));
}
@Test
public void can_specify_numeric_id_without_explicit_selection() {
assertResultingDocumentSelection("number/1234", "id.user==1234");
}
@Test
public void can_specify_group_id_without_explicit_selection() {
assertResultingDocumentSelection("group/foo", "id.group=='foo'");
}
@Test
public void can_specify_both_numeric_id_and_explicit_selection() {
assertResultingDocumentSelection(String.format("number/1234?selection=%s", encoded("1 != 2")),
"id.user==1234 and (1 != 2)");
}
@Test
public void can_specify_both_group_id_and_explicit_selection() {
assertResultingDocumentSelection(String.format("group/bar?selection=%s", encoded("3 != 4")),
"id.group=='bar' and (3 != 4)");
}
private void assertDocumentSelectionFailsParsing(String expression) {
String output = performV1RestCall(String.format("number/1234?selection=%s", encoded(expression)));
assertThat(output, containsString("Failed to parse expression given in 'selection' parameter. Must be a complete and valid sub-expression."));
}
@Test
public void explicit_selection_sub_expression_is_validated_for_completeness() {
assertDocumentSelectionFailsParsing("1 +");
assertDocumentSelectionFailsParsing(") or true");
assertDocumentSelectionFailsParsing("((1 + 2)");
assertDocumentSelectionFailsParsing("true) or (true");
}
@Test
public void wanted_document_count_returned_parameter_is_propagated() throws IOException {
Request request = new Request(String.format("http:
HttpGet get = new HttpGet(request.getUri());
String rest = doRest(get);
assertThat(rest, containsString("min docs returned: 321"));
}
@Test
public void invalid_wanted_document_count_parameter_returns_error_response() throws IOException {
Request request = new Request(String.format("http:
HttpGet get = new HttpGet(request.getUri());
String rest = doRest(get);
assertThat(rest, containsString("Invalid 'wantedDocumentCount' value. Expected positive integer"));
}
@Test
public void negative_document_count_parameter_returns_error_response() throws IOException {
Request request = new Request(String.format("http:
HttpGet get = new HttpGet(request.getUri());
String rest = doRest(get);
assertThat(rest, containsString("Invalid 'wantedDocumentCount' value. Expected positive integer"));
}
@Test
public void visit_fieldset_parameter_is_propagated() throws IOException {
Request request = new Request(String.format("http:
HttpGet get = new HttpGet(request.getUri());
String rest = doRest(get);
assertThat(rest, containsString("field set: 'foo,baz'"));
}
@Test
public void visit_concurrency_parameter_is_propagated() throws IOException {
Request request = new Request(String.format("http:
HttpGet get = new HttpGet(request.getUri());
String rest = doRest(get);
assertThat(rest, containsString("concurrency: 42"));
}
@Test
public void invalid_visit_concurrency_parameter_returns_error_response() throws IOException {
Request request = new Request(String.format("http:
HttpGet get = new HttpGet(request.getUri());
String rest = doRest(get);
assertThat(rest, containsString("Invalid 'concurrency' value. Expected positive integer"));
}
private String doRest(HttpRequestBase request) throws IOException {
HttpClient client = HttpClientBuilder.create().build();
HttpResponse response = client.execute(request);
assertThat(response.getEntity().getContentType().getValue().toString(), startsWith("application/json;"));
HttpEntity entity = response.getEntity();
return EntityUtils.toString(entity);
}
private String getFirstListenPort() {
JettyHttpServer serverProvider =
(JettyHttpServer) Container.get().getServerProviderRegistry().allComponents().get(0);
return Integer.toString(serverProvider.getListenPort());
}
} |
This isn't in sync with what is deployed now, but we should never trigger things that haven't already passed this check at triggering-time. I will fix this when I add specific versions for system and staging tests, based on what will be deployed in production. | private void validateChange(Application application, ZoneId zone, Version version) {
if ( ! application.deploymentJobs().isDeployableTo(zone.environment(), application.change())) {
throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone +
" as " + application.change() + " is not tested");
}
Deployment existingDeployment = application.deployments().get(zone);
if (zone.environment().isProduction() && existingDeployment != null &&
existingDeployment.version().isAfter(version)) {
throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone +
" as the requested version " + version + " is older than" +
" the current version " + existingDeployment.version());
}
} | if ( ! application.deploymentJobs().isDeployableTo(zone.environment(), application.change())) { | private void validateChange(Application application, ZoneId zone, Version version) {
if ( ! application.deploymentJobs().isDeployableTo(zone.environment(), application.change())) {
throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone +
" as " + application.change() + " is not tested");
}
Deployment existingDeployment = application.deployments().get(zone);
if (zone.environment().isProduction() && existingDeployment != null &&
existingDeployment.version().isAfter(version)) {
throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone +
" as the requested version " + version + " is older than" +
" the current version " + existingDeployment.version());
}
} | class ApplicationController {
private static final Logger log = Logger.getLogger(ApplicationController.class.getName());
/** The controller owning this */
private final Controller controller;
/** For permanent storage */
private final ControllerDb db;
/** For working memory storage and sharing between controllers */
private final CuratorDb curator;
private final ArtifactRepository artifactRepository;
private final RotationRepository rotationRepository;
private final AthenzClientFactory zmsClientFactory;
private final NameService nameService;
private final ConfigServerClient configServer;
private final RoutingGenerator routingGenerator;
private final Clock clock;
private final DeploymentTrigger deploymentTrigger;
ApplicationController(Controller controller, ControllerDb db, CuratorDb curator,
AthenzClientFactory zmsClientFactory, RotationsConfig rotationsConfig,
NameService nameService, ConfigServerClient configServer,
ArtifactRepository artifactRepository,
RoutingGenerator routingGenerator, BuildService buildService, Clock clock) {
this.controller = controller;
this.db = db;
this.curator = curator;
this.zmsClientFactory = zmsClientFactory;
this.nameService = nameService;
this.configServer = configServer;
this.routingGenerator = routingGenerator;
this.clock = clock;
this.artifactRepository = artifactRepository;
this.rotationRepository = new RotationRepository(rotationsConfig, this, curator);
this.deploymentTrigger = new DeploymentTrigger(controller, curator, buildService, clock);
for (Application application : db.listApplications()) {
lockIfPresent(application.id(), this::store);
}
}
/** Returns the application with the given id, or null if it is not present */
public Optional<Application> get(ApplicationId id) {
return db.getApplication(id);
}
/**
* Returns the application with the given id
*
* @throws IllegalArgumentException if it does not exist
*/
public Application require(ApplicationId id) {
return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found"));
}
/** Returns a snapshot of all applications */
public List<Application> asList() {
return sort(db.listApplications());
}
/** Returns all applications of a tenant */
public List<Application> asList(TenantName tenant) {
return sort(db.listApplications(tenant));
}
/**
* Set the rotations marked as 'global' either 'in' or 'out of' service.
*
* @return The canonical endpoint altered if any
* @throws IOException if rotation status cannot be updated
*/
public List<String> setGlobalRotationStatus(DeploymentId deploymentId, EndpointStatus status) throws IOException {
List<String> rotations = new ArrayList<>();
Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId);
if (endpoint.isPresent()) {
configServer.setGlobalRotationStatus(deploymentId, endpoint.get(), status);
rotations.add(endpoint.get());
}
return rotations;
}
/**
* Get the endpoint status for the global endpoint of this application
*
* @return Map between the endpoint and the rotation status
* @throws IOException if global rotation status cannot be determined
*/
public Map<String, EndpointStatus> getGlobalRotationStatus(DeploymentId deploymentId) throws IOException {
Map<String, EndpointStatus> result = new HashMap<>();
Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId);
if (endpoint.isPresent()) {
EndpointStatus status = configServer.getGlobalRotationStatus(deploymentId, endpoint.get());
result.put(endpoint.get(), status);
}
return result;
}
/**
* Global rotations (plural as we can have aliases) map to exactly one service endpoint.
* This method finds that one service endpoint and strips the URI part that
* the routingGenerator is wrapping around the endpoint.
*
* @param deploymentId The deployment to retrieve global service endpoint for
* @return Empty if no global endpoint exist, otherwise the service endpoint ([clustername.]app.tenant.region.env)
*/
Optional<String> getCanonicalGlobalEndpoint(DeploymentId deploymentId) throws IOException {
Map<String, RoutingEndpoint> hostToGlobalEndpoint = new HashMap<>();
Map<String, String> hostToCanonicalEndpoint = new HashMap<>();
for (RoutingEndpoint endpoint : routingGenerator.endpoints(deploymentId)) {
try {
URI uri = new URI(endpoint.getEndpoint());
String serviceEndpoint = uri.getHost();
if (serviceEndpoint == null) {
throw new IOException("Unexpected endpoints returned from the Routing Generator");
}
String canonicalEndpoint = serviceEndpoint.replaceAll(".vespa.yahooapis.com", "");
String hostname = endpoint.getHostname();
if (hostname != null) {
if (endpoint.isGlobal()) {
hostToGlobalEndpoint.put(hostname, endpoint);
} else {
hostToCanonicalEndpoint.put(hostname, canonicalEndpoint);
}
if (hostToGlobalEndpoint.containsKey(hostname) && hostToCanonicalEndpoint.containsKey(hostname)) {
return Optional.of(hostToCanonicalEndpoint.get(hostname));
}
}
} catch (URISyntaxException use) {
throw new IOException(use);
}
}
return Optional.empty();
}
/**
* Creates a new application for an existing tenant.
*
* @throws IllegalArgumentException if the application already exists
*/
public Application createApplication(ApplicationId id, Optional<NToken> token) {
if ( ! (id.instance().isDefault() || id.instance().value().matches("\\d+")))
throw new UnsupportedOperationException("Only the instance names 'default' and names which are just the PR number are supported at the moment");
try (Lock lock = lock(id)) {
if (asList(id.tenant()).stream().noneMatch(application -> application.id().application().equals(id.application())))
com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value());
Optional<Tenant> tenant = controller.tenants().tenant(id.tenant());
if ( ! tenant.isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist");
if (get(id).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application already exists");
if (get(dashToUnderscore(id)).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists");
if (id.instance().isDefault() && tenant.get() instanceof AthenzTenant) {
if ( ! token.isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': No NToken provided");
ZmsClient zmsClient = zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get());
zmsClient.addApplication(((AthenzTenant) tenant.get()).domain(),
new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value()));
}
LockedApplication application = new LockedApplication(new Application(id), lock);
store(application);
log.info("Created " + application);
return application;
}
}
/** Deploys an application. If the application does not exist it is created. */
public ActivateResult deployApplication(ApplicationId applicationId, ZoneId zone,
Optional<ApplicationPackage> applicationPackageFromDeployer,
DeployOptions options) {
try (Lock lock = lock(applicationId)) {
LockedApplication application = get(applicationId)
.map(app -> new LockedApplication(app, lock))
.orElseGet(() -> new LockedApplication(createApplication(applicationId, Optional.empty()), lock));
boolean canDeployDirectly = ! options.screwdriverBuildJob.map(job1 -> job1.screwdriverId).isPresent()
|| zone.environment().isManuallyDeployed();
boolean preferOldestVersion = options.deployCurrentVersion;
Version platformVersion;
ApplicationVersion applicationVersion;
ApplicationPackage applicationPackage;
if (canDeployDirectly) {
platformVersion = options.vespaVersion.map(Version::new).orElse(controller.systemVersion());
applicationVersion = ApplicationVersion.unknown;
applicationPackage = applicationPackageFromDeployer.orElseThrow(
() -> new IllegalArgumentException("Application package must be given when deploying to " + zone));
} else {
JobType jobType = JobType.from(controller.system(), zone)
.orElseThrow(() -> new IllegalArgumentException("No job found for zone " + zone));
Optional<JobStatus.JobRun> triggered = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType))
.flatMap(JobStatus::lastTriggered);
if ( ! triggered.isPresent())
return unexpectedDeployment(applicationId, zone);
platformVersion = preferOldestVersion
? application.oldestDeployedPlatform().orElse(controller.systemVersion())
: triggered.get().version();
applicationVersion = preferOldestVersion
? application.oldestDeployedApplication().orElse(triggered.get().applicationVersion())
: triggered.get().applicationVersion();
applicationPackage = new ApplicationPackage(artifactRepository.getApplicationPackage(application.id(), applicationVersion.id()));
}
validate(applicationPackage.deploymentSpec());
if ( ! preferOldestVersion) {
application = application.with(applicationPackage.deploymentSpec());
application = application.with(applicationPackage.validationOverrides());
application = deleteRemovedDeployments(application);
application = deleteUnreferencedDeploymentJobs(application);
store(application);
}
if ( ! canDeployDirectly) {
validateChange(application, zone, platformVersion);
}
application = withRotation(application, zone);
Set<String> rotationNames = new HashSet<>();
Set<String> cnames = new HashSet<>();
application.rotation().ifPresent(applicationRotation -> {
rotationNames.add(applicationRotation.id().asString());
cnames.add(applicationRotation.dnsName());
cnames.add(applicationRotation.secureDnsName());
});
options = withVersion(platformVersion, options);
ConfigServerClient.PreparedApplication preparedApplication;
DeploymentId deploymentId = new DeploymentId(applicationId, zone);
if (controller.system().equals(SystemName.cd) && deploymentId.applicationId().tenant().value().startsWith(Tenant.userPrefix)) {
preparedApplication = configServer.deploy(deploymentId, options, cnames, rotationNames, applicationPackage.zippedContent());
} else {
preparedApplication = configServer.prepare(deploymentId, options, cnames, rotationNames, applicationPackage.zippedContent());
preparedApplication.activate();
}
application = application.withNewDeployment(zone, applicationVersion, platformVersion, clock.instant());
store(application);
return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(),
applicationPackage.zippedContent().length);
}
}
/** Makes sure the application has a global rotation, if eligible. */
private LockedApplication withRotation(LockedApplication application, ZoneId zone) {
if (zone.environment() == Environment.prod && application.deploymentSpec().globalServiceId().isPresent()) {
try (RotationLock rotationLock = rotationRepository.lock()) {
Rotation rotation = rotationRepository.getRotation(application, rotationLock);
application = application.with(rotation.id());
store(application);
registerRotationInDns(rotation, application.rotation().get().dnsName());
registerRotationInDns(rotation, application.rotation().get().secureDnsName());
}
}
return application;
}
private ActivateResult unexpectedDeployment(ApplicationId applicationId, ZoneId zone) {
Log logEntry = new Log();
logEntry.level = "WARNING";
logEntry.time = clock.instant().toEpochMilli();
logEntry.message = "Ignoring deployment of " + require(applicationId) + " to " + zone +
" as a deployment is not currently expected";
PrepareResponse prepareResponse = new PrepareResponse();
prepareResponse.log = Collections.singletonList(logEntry);
prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList());
return new ActivateResult(new RevisionId("0"), prepareResponse, 0);
}
private LockedApplication deleteRemovedDeployments(LockedApplication application) {
List<Deployment> deploymentsToRemove = application.productionDeployments().values().stream()
.filter(deployment -> ! application.deploymentSpec().includes(deployment.zone().environment(),
Optional.of(deployment.zone().region())))
.collect(Collectors.toList());
if (deploymentsToRemove.isEmpty()) return application;
if ( ! application.validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant()))
throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application +
" is deployed in " +
deploymentsToRemove.stream()
.map(deployment -> deployment.zone().region().value())
.collect(Collectors.joining(", ")) +
", but does not include " +
(deploymentsToRemove.size() > 1 ? "these zones" : "this zone") +
" in deployment.xml");
LockedApplication applicationWithRemoval = application;
for (Deployment deployment : deploymentsToRemove)
applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone());
return applicationWithRemoval;
}
private LockedApplication deleteUnreferencedDeploymentJobs(LockedApplication application) {
for (JobType job : application.deploymentJobs().jobStatus().keySet()) {
Optional<ZoneId> zone = job.zone(controller.system());
if ( ! job.isProduction() || (zone.isPresent() && application.deploymentSpec().includes(zone.get().environment(), zone.map(ZoneId::region))))
continue;
application = application.withoutDeploymentJob(job);
}
return application;
}
private DeployOptions withVersion(Version version, DeployOptions options) {
return new DeployOptions(options.screwdriverBuildJob,
Optional.of(version),
options.ignoreValidationErrors,
options.deployCurrentVersion);
}
/** Register a DNS name for rotation */
private void registerRotationInDns(Rotation rotation, String dnsName) {
try {
Optional<Record> record = nameService.findRecord(Record.Type.CNAME, RecordName.from(dnsName));
RecordData rotationName = RecordData.fqdn(rotation.name());
if (record.isPresent()) {
if ( ! record.get().data().equals(rotationName)) {
nameService.updateRecord(record.get().id(), rotationName);
log.info("Updated mapping for record ID " + record.get().id().asString() + ": '" + dnsName
+ "' -> '" + rotation.name() + "'");
}
} else {
RecordId id = nameService.createCname(RecordName.from(dnsName), rotationName);
log.info("Registered mapping with record ID " + id.asString() + ": '" + dnsName + "' -> '"
+ rotation.name() + "'");
}
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to register CNAME", e);
}
}
/** Returns the endpoints of the deployment, or an empty list if the request fails */
public Optional<List<URI>> getDeploymentEndpoints(DeploymentId deploymentId) {
try {
return Optional.of(ImmutableList.copyOf(routingGenerator.endpoints(deploymentId).stream()
.map(RoutingEndpoint::getEndpoint)
.map(URI::create)
.iterator()));
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId + ": "
+ Exceptions.toMessageString(e));
return Optional.empty();
}
}
/**
* Deletes the the given application. All known instances of the applications will be deleted,
* including PR instances.
*
* @throws IllegalArgumentException if the application has deployments or the caller is not authorized
* @throws NotExistsException if no instances of the application exist
*/
public void deleteApplication(ApplicationId applicationId, Optional<NToken> token) {
List<ApplicationId> instances = controller.applications().asList(applicationId.tenant())
.stream()
.map(Application::id)
.filter(id -> id.application().equals(applicationId.application()) &&
id.tenant().equals(applicationId.tenant()))
.collect(Collectors.toList());
if (instances.isEmpty()) {
throw new NotExistsException("Could not delete application '" + applicationId + "': Application not found");
}
instances.forEach(id -> lockOrThrow(id, application -> {
if ( ! application.deployments().isEmpty())
throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments");
Tenant tenant = controller.tenants().tenant(id.tenant()).get();
if (tenant instanceof AthenzTenant && ! token.isPresent())
throw new IllegalArgumentException("Could not delete '" + application + "': No NToken provided");
if (id.instance().isDefault() && tenant instanceof AthenzTenant) {
zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get())
.deleteApplication(((AthenzTenant) tenant).domain(),
new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value()));
}
db.deleteApplication(id);
log.info("Deleted " + application);
}));
}
/**
* Replace any previous version of this application by this instance
*
* @param application a locked application to store
*/
public void store(LockedApplication application) {
db.store(application);
}
/**
* Acquire a locked application to modify and store, if there is an application with the given id.
*
* @param applicationId ID of the application to lock and get.
* @param action Function which acts on the locked application.
*/
public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action);
}
}
/**
* Acquire a locked application to modify and store, or throw an exception if no application has the given id.
*
* @param applicationId ID of the application to lock and require.
* @param action Function which acts on the locked application.
* @throws IllegalArgumentException when application does not exist.
*/
public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
action.accept(new LockedApplication(require(applicationId), lock));
}
}
/**
* Tells config server to schedule a restart of all nodes in this deployment
*
* @param hostname If non-empty, restart will only be scheduled for this host
*/
public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) {
try {
configServer.restart(deploymentId, hostname);
}
catch (NoInstanceException e) {
throw new IllegalArgumentException("Could not restart " + deploymentId + ": No such deployment");
}
}
/** Deactivate application in the given zone */
public void deactivate(Application application, ZoneId zone) {
deactivate(application, zone, Optional.empty(), false);
}
/** Deactivate a known deployment of the given application */
public void deactivate(Application application, Deployment deployment, boolean requireThatDeploymentHasExpired) {
deactivate(application, deployment.zone(), Optional.of(deployment), requireThatDeploymentHasExpired);
}
private void deactivate(Application application, ZoneId zone, Optional<Deployment> deployment,
boolean requireThatDeploymentHasExpired) {
if (requireThatDeploymentHasExpired && deployment.isPresent()
&& ! DeploymentExpirer.hasExpired(controller.zoneRegistry(), deployment.get(), clock.instant()))
return;
lockOrThrow(application.id(), lockedApplication -> store(deactivate(lockedApplication, zone)));
}
/**
* Deactivates a locked application without storing it
*
* @return the application with the deployment in the given zone removed
*/
private LockedApplication deactivate(LockedApplication application, ZoneId zone) {
try {
configServer.deactivate(new DeploymentId(application.id(), zone));
}
catch (NoInstanceException ignored) {
}
return application.withoutDeploymentIn(zone);
}
public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; }
private ApplicationId dashToUnderscore(ApplicationId id) {
return ApplicationId.from(id.tenant().value(),
id.application().value().replaceAll("-", "_"),
id.instance().value());
}
public ConfigServerClient configServer() { return configServer; }
/**
* Returns a lock which provides exclusive rights to changing this application.
* Any operation which stores an application need to first acquire this lock, then read, modify
* and store the application, and finally release (close) the lock.
*/
Lock lock(ApplicationId application) {
return curator.lock(application, Duration.ofMinutes(10));
}
/** Verify that each of the production zones listed in the deployment spec exist in this system. */
private void validate(DeploymentSpec deploymentSpec) {
deploymentSpec.zones().stream()
.filter(zone -> zone.environment() == Environment.prod)
.forEach(zone -> {
if ( ! controller.zoneRegistry().hasZone(ZoneId.from(zone.environment(),
zone.region().orElse(null)))) {
throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in " +
"this system!");
}
});
}
/** Verify that what we want to deploy is tested and that we aren't downgrading */
public RotationRepository rotationRepository() {
return rotationRepository;
}
/** Sort given list of applications by application ID */
private static List<Application> sort(List<Application> applications) {
return applications.stream().sorted(Comparator.comparing(Application::id)).collect(Collectors.toList());
}
} | class ApplicationController {
private static final Logger log = Logger.getLogger(ApplicationController.class.getName());
/** The controller owning this */
private final Controller controller;
/** For permanent storage */
private final ControllerDb db;
/** For working memory storage and sharing between controllers */
private final CuratorDb curator;
private final ArtifactRepository artifactRepository;
private final RotationRepository rotationRepository;
private final AthenzClientFactory zmsClientFactory;
private final NameService nameService;
private final ConfigServerClient configServer;
private final RoutingGenerator routingGenerator;
private final Clock clock;
private final DeploymentTrigger deploymentTrigger;
ApplicationController(Controller controller, ControllerDb db, CuratorDb curator,
AthenzClientFactory zmsClientFactory, RotationsConfig rotationsConfig,
NameService nameService, ConfigServerClient configServer,
ArtifactRepository artifactRepository,
RoutingGenerator routingGenerator, BuildService buildService, Clock clock) {
this.controller = controller;
this.db = db;
this.curator = curator;
this.zmsClientFactory = zmsClientFactory;
this.nameService = nameService;
this.configServer = configServer;
this.routingGenerator = routingGenerator;
this.clock = clock;
this.artifactRepository = artifactRepository;
this.rotationRepository = new RotationRepository(rotationsConfig, this, curator);
this.deploymentTrigger = new DeploymentTrigger(controller, curator, buildService, clock);
for (Application application : db.listApplications()) {
lockIfPresent(application.id(), this::store);
}
}
/** Returns the application with the given id, or null if it is not present */
public Optional<Application> get(ApplicationId id) {
return db.getApplication(id);
}
/**
* Returns the application with the given id
*
* @throws IllegalArgumentException if it does not exist
*/
public Application require(ApplicationId id) {
return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found"));
}
/** Returns a snapshot of all applications */
public List<Application> asList() {
return sort(db.listApplications());
}
/** Returns all applications of a tenant */
public List<Application> asList(TenantName tenant) {
return sort(db.listApplications(tenant));
}
/**
* Set the rotations marked as 'global' either 'in' or 'out of' service.
*
* @return The canonical endpoint altered if any
* @throws IOException if rotation status cannot be updated
*/
public List<String> setGlobalRotationStatus(DeploymentId deploymentId, EndpointStatus status) throws IOException {
List<String> rotations = new ArrayList<>();
Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId);
if (endpoint.isPresent()) {
configServer.setGlobalRotationStatus(deploymentId, endpoint.get(), status);
rotations.add(endpoint.get());
}
return rotations;
}
/**
* Get the endpoint status for the global endpoint of this application
*
* @return Map between the endpoint and the rotation status
* @throws IOException if global rotation status cannot be determined
*/
public Map<String, EndpointStatus> getGlobalRotationStatus(DeploymentId deploymentId) throws IOException {
Map<String, EndpointStatus> result = new HashMap<>();
Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId);
if (endpoint.isPresent()) {
EndpointStatus status = configServer.getGlobalRotationStatus(deploymentId, endpoint.get());
result.put(endpoint.get(), status);
}
return result;
}
/**
* Global rotations (plural as we can have aliases) map to exactly one service endpoint.
* This method finds that one service endpoint and strips the URI part that
* the routingGenerator is wrapping around the endpoint.
*
* @param deploymentId The deployment to retrieve global service endpoint for
* @return Empty if no global endpoint exist, otherwise the service endpoint ([clustername.]app.tenant.region.env)
*/
Optional<String> getCanonicalGlobalEndpoint(DeploymentId deploymentId) throws IOException {
Map<String, RoutingEndpoint> hostToGlobalEndpoint = new HashMap<>();
Map<String, String> hostToCanonicalEndpoint = new HashMap<>();
for (RoutingEndpoint endpoint : routingGenerator.endpoints(deploymentId)) {
try {
URI uri = new URI(endpoint.getEndpoint());
String serviceEndpoint = uri.getHost();
if (serviceEndpoint == null) {
throw new IOException("Unexpected endpoints returned from the Routing Generator");
}
String canonicalEndpoint = serviceEndpoint.replaceAll(".vespa.yahooapis.com", "");
String hostname = endpoint.getHostname();
if (hostname != null) {
if (endpoint.isGlobal()) {
hostToGlobalEndpoint.put(hostname, endpoint);
} else {
hostToCanonicalEndpoint.put(hostname, canonicalEndpoint);
}
if (hostToGlobalEndpoint.containsKey(hostname) && hostToCanonicalEndpoint.containsKey(hostname)) {
return Optional.of(hostToCanonicalEndpoint.get(hostname));
}
}
} catch (URISyntaxException use) {
throw new IOException(use);
}
}
return Optional.empty();
}
/**
* Creates a new application for an existing tenant.
*
* @throws IllegalArgumentException if the application already exists
*/
public Application createApplication(ApplicationId id, Optional<NToken> token) {
if ( ! (id.instance().isDefault() || id.instance().value().matches("\\d+")))
throw new UnsupportedOperationException("Only the instance names 'default' and names which are just the PR number are supported at the moment");
try (Lock lock = lock(id)) {
if (asList(id.tenant()).stream().noneMatch(application -> application.id().application().equals(id.application())))
com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value());
Optional<Tenant> tenant = controller.tenants().tenant(id.tenant());
if ( ! tenant.isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist");
if (get(id).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application already exists");
if (get(dashToUnderscore(id)).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists");
if (id.instance().isDefault() && tenant.get() instanceof AthenzTenant) {
if ( ! token.isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': No NToken provided");
ZmsClient zmsClient = zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get());
zmsClient.addApplication(((AthenzTenant) tenant.get()).domain(),
new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value()));
}
LockedApplication application = new LockedApplication(new Application(id), lock);
store(application);
log.info("Created " + application);
return application;
}
}
/** Deploys an application. If the application does not exist it is created. */
public ActivateResult deployApplication(ApplicationId applicationId, ZoneId zone,
Optional<ApplicationPackage> applicationPackageFromDeployer,
DeployOptions options) {
try (Lock lock = lock(applicationId)) {
LockedApplication application = get(applicationId)
.map(app -> new LockedApplication(app, lock))
.orElseGet(() -> new LockedApplication(createApplication(applicationId, Optional.empty()), lock));
boolean canDeployDirectly = ! options.screwdriverBuildJob.map(job1 -> job1.screwdriverId).isPresent()
|| zone.environment().isManuallyDeployed();
boolean preferOldestVersion = options.deployCurrentVersion;
Version platformVersion;
ApplicationVersion applicationVersion;
ApplicationPackage applicationPackage;
if (canDeployDirectly) {
platformVersion = options.vespaVersion.map(Version::new).orElse(controller.systemVersion());
applicationVersion = ApplicationVersion.unknown;
applicationPackage = applicationPackageFromDeployer.orElseThrow(
() -> new IllegalArgumentException("Application package must be given when deploying to " + zone));
} else {
JobType jobType = JobType.from(controller.system(), zone)
.orElseThrow(() -> new IllegalArgumentException("No job found for zone " + zone));
Optional<JobStatus.JobRun> triggered = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType))
.flatMap(JobStatus::lastTriggered);
if ( ! triggered.isPresent())
return unexpectedDeployment(applicationId, zone);
platformVersion = preferOldestVersion
? application.oldestDeployedPlatform().orElse(controller.systemVersion())
: triggered.get().version();
applicationVersion = preferOldestVersion
? application.oldestDeployedApplication().orElse(triggered.get().applicationVersion())
: triggered.get().applicationVersion();
applicationPackage = new ApplicationPackage(artifactRepository.getApplicationPackage(application.id(), applicationVersion.id()));
}
validate(applicationPackage.deploymentSpec());
if ( ! preferOldestVersion) {
application = application.with(applicationPackage.deploymentSpec());
application = application.with(applicationPackage.validationOverrides());
application = deleteRemovedDeployments(application);
application = deleteUnreferencedDeploymentJobs(application);
store(application);
}
if ( ! canDeployDirectly) {
validateChange(application, zone, platformVersion);
}
application = withRotation(application, zone);
Set<String> rotationNames = new HashSet<>();
Set<String> cnames = new HashSet<>();
application.rotation().ifPresent(applicationRotation -> {
rotationNames.add(applicationRotation.id().asString());
cnames.add(applicationRotation.dnsName());
cnames.add(applicationRotation.secureDnsName());
});
options = withVersion(platformVersion, options);
ConfigServerClient.PreparedApplication preparedApplication;
DeploymentId deploymentId = new DeploymentId(applicationId, zone);
if (controller.system().equals(SystemName.cd) && deploymentId.applicationId().tenant().value().startsWith(Tenant.userPrefix)) {
preparedApplication = configServer.deploy(deploymentId, options, cnames, rotationNames, applicationPackage.zippedContent());
} else {
preparedApplication = configServer.prepare(deploymentId, options, cnames, rotationNames, applicationPackage.zippedContent());
preparedApplication.activate();
}
application = application.withNewDeployment(zone, applicationVersion, platformVersion, clock.instant());
store(application);
return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(),
applicationPackage.zippedContent().length);
}
}
/** Makes sure the application has a global rotation, if eligible. */
private LockedApplication withRotation(LockedApplication application, ZoneId zone) {
if (zone.environment() == Environment.prod && application.deploymentSpec().globalServiceId().isPresent()) {
try (RotationLock rotationLock = rotationRepository.lock()) {
Rotation rotation = rotationRepository.getRotation(application, rotationLock);
application = application.with(rotation.id());
store(application);
registerRotationInDns(rotation, application.rotation().get().dnsName());
registerRotationInDns(rotation, application.rotation().get().secureDnsName());
}
}
return application;
}
private ActivateResult unexpectedDeployment(ApplicationId applicationId, ZoneId zone) {
Log logEntry = new Log();
logEntry.level = "WARNING";
logEntry.time = clock.instant().toEpochMilli();
logEntry.message = "Ignoring deployment of " + require(applicationId) + " to " + zone +
" as a deployment is not currently expected";
PrepareResponse prepareResponse = new PrepareResponse();
prepareResponse.log = Collections.singletonList(logEntry);
prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList());
return new ActivateResult(new RevisionId("0"), prepareResponse, 0);
}
private LockedApplication deleteRemovedDeployments(LockedApplication application) {
List<Deployment> deploymentsToRemove = application.productionDeployments().values().stream()
.filter(deployment -> ! application.deploymentSpec().includes(deployment.zone().environment(),
Optional.of(deployment.zone().region())))
.collect(Collectors.toList());
if (deploymentsToRemove.isEmpty()) return application;
if ( ! application.validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant()))
throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application +
" is deployed in " +
deploymentsToRemove.stream()
.map(deployment -> deployment.zone().region().value())
.collect(Collectors.joining(", ")) +
", but does not include " +
(deploymentsToRemove.size() > 1 ? "these zones" : "this zone") +
" in deployment.xml");
LockedApplication applicationWithRemoval = application;
for (Deployment deployment : deploymentsToRemove)
applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone());
return applicationWithRemoval;
}
private LockedApplication deleteUnreferencedDeploymentJobs(LockedApplication application) {
for (JobType job : application.deploymentJobs().jobStatus().keySet()) {
Optional<ZoneId> zone = job.zone(controller.system());
if ( ! job.isProduction() || (zone.isPresent() && application.deploymentSpec().includes(zone.get().environment(), zone.map(ZoneId::region))))
continue;
application = application.withoutDeploymentJob(job);
}
return application;
}
private DeployOptions withVersion(Version version, DeployOptions options) {
return new DeployOptions(options.screwdriverBuildJob,
Optional.of(version),
options.ignoreValidationErrors,
options.deployCurrentVersion);
}
/** Register a DNS name for rotation */
private void registerRotationInDns(Rotation rotation, String dnsName) {
try {
Optional<Record> record = nameService.findRecord(Record.Type.CNAME, RecordName.from(dnsName));
RecordData rotationName = RecordData.fqdn(rotation.name());
if (record.isPresent()) {
if ( ! record.get().data().equals(rotationName)) {
nameService.updateRecord(record.get().id(), rotationName);
log.info("Updated mapping for record ID " + record.get().id().asString() + ": '" + dnsName
+ "' -> '" + rotation.name() + "'");
}
} else {
RecordId id = nameService.createCname(RecordName.from(dnsName), rotationName);
log.info("Registered mapping with record ID " + id.asString() + ": '" + dnsName + "' -> '"
+ rotation.name() + "'");
}
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to register CNAME", e);
}
}
/** Returns the endpoints of the deployment, or an empty list if the request fails */
public Optional<List<URI>> getDeploymentEndpoints(DeploymentId deploymentId) {
try {
return Optional.of(ImmutableList.copyOf(routingGenerator.endpoints(deploymentId).stream()
.map(RoutingEndpoint::getEndpoint)
.map(URI::create)
.iterator()));
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId + ": "
+ Exceptions.toMessageString(e));
return Optional.empty();
}
}
/**
* Deletes the the given application. All known instances of the applications will be deleted,
* including PR instances.
*
* @throws IllegalArgumentException if the application has deployments or the caller is not authorized
* @throws NotExistsException if no instances of the application exist
*/
public void deleteApplication(ApplicationId applicationId, Optional<NToken> token) {
List<ApplicationId> instances = controller.applications().asList(applicationId.tenant())
.stream()
.map(Application::id)
.filter(id -> id.application().equals(applicationId.application()) &&
id.tenant().equals(applicationId.tenant()))
.collect(Collectors.toList());
if (instances.isEmpty()) {
throw new NotExistsException("Could not delete application '" + applicationId + "': Application not found");
}
instances.forEach(id -> lockOrThrow(id, application -> {
if ( ! application.deployments().isEmpty())
throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments");
Tenant tenant = controller.tenants().tenant(id.tenant()).get();
if (tenant instanceof AthenzTenant && ! token.isPresent())
throw new IllegalArgumentException("Could not delete '" + application + "': No NToken provided");
if (id.instance().isDefault() && tenant instanceof AthenzTenant) {
zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get())
.deleteApplication(((AthenzTenant) tenant).domain(),
new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value()));
}
db.deleteApplication(id);
log.info("Deleted " + application);
}));
}
/**
* Replace any previous version of this application by this instance
*
* @param application a locked application to store
*/
public void store(LockedApplication application) {
db.store(application);
}
/**
* Acquire a locked application to modify and store, if there is an application with the given id.
*
* @param applicationId ID of the application to lock and get.
* @param action Function which acts on the locked application.
*/
public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action);
}
}
/**
* Acquire a locked application to modify and store, or throw an exception if no application has the given id.
*
* @param applicationId ID of the application to lock and require.
* @param action Function which acts on the locked application.
* @throws IllegalArgumentException when application does not exist.
*/
public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
action.accept(new LockedApplication(require(applicationId), lock));
}
}
/**
* Tells config server to schedule a restart of all nodes in this deployment
*
* @param hostname If non-empty, restart will only be scheduled for this host
*/
public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) {
try {
configServer.restart(deploymentId, hostname);
}
catch (NoInstanceException e) {
throw new IllegalArgumentException("Could not restart " + deploymentId + ": No such deployment");
}
}
/** Deactivate application in the given zone */
public void deactivate(Application application, ZoneId zone) {
deactivate(application, zone, Optional.empty(), false);
}
/** Deactivate a known deployment of the given application */
public void deactivate(Application application, Deployment deployment, boolean requireThatDeploymentHasExpired) {
deactivate(application, deployment.zone(), Optional.of(deployment), requireThatDeploymentHasExpired);
}
private void deactivate(Application application, ZoneId zone, Optional<Deployment> deployment,
boolean requireThatDeploymentHasExpired) {
if (requireThatDeploymentHasExpired && deployment.isPresent()
&& ! DeploymentExpirer.hasExpired(controller.zoneRegistry(), deployment.get(), clock.instant()))
return;
lockOrThrow(application.id(), lockedApplication -> store(deactivate(lockedApplication, zone)));
}
/**
* Deactivates a locked application without storing it
*
* @return the application with the deployment in the given zone removed
*/
private LockedApplication deactivate(LockedApplication application, ZoneId zone) {
try {
configServer.deactivate(new DeploymentId(application.id(), zone));
}
catch (NoInstanceException ignored) {
}
return application.withoutDeploymentIn(zone);
}
public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; }
private ApplicationId dashToUnderscore(ApplicationId id) {
return ApplicationId.from(id.tenant().value(),
id.application().value().replaceAll("-", "_"),
id.instance().value());
}
public ConfigServerClient configServer() { return configServer; }
/**
* Returns a lock which provides exclusive rights to changing this application.
* Any operation which stores an application need to first acquire this lock, then read, modify
* and store the application, and finally release (close) the lock.
*/
Lock lock(ApplicationId application) {
return curator.lock(application, Duration.ofMinutes(10));
}
/** Verify that each of the production zones listed in the deployment spec exist in this system. */
private void validate(DeploymentSpec deploymentSpec) {
deploymentSpec.zones().stream()
.filter(zone -> zone.environment() == Environment.prod)
.forEach(zone -> {
if ( ! controller.zoneRegistry().hasZone(ZoneId.from(zone.environment(),
zone.region().orElse(null)))) {
throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in " +
"this system!");
}
});
}
/** Verify that what we want to deploy is tested and that we aren't downgrading */
public RotationRepository rotationRepository() {
return rotationRepository;
}
/** Sort given list of applications by application ID */
private static List<Application> sort(List<Application> applications) {
return applications.stream().sorted(Comparator.comparing(Application::id)).collect(Collectors.toList());
}
} |
The value of the (old) `documentSelection` previously had one equals sign between, for example, `id.group` and its value (https://github.com/vespa-engine/vespa/blob/177392cd29ab9db2de0471d295cce0d0423ed726/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/RestApi.java#L297) But now there are two equal signs (https://github.com/vespa-engine/vespa/blob/932812ddcd333ce57c0a5d812b12d4f0ced971a5/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/RestApi.java#L324) | private HttpResponse handleVisit(RestUri restUri, HttpRequest request) throws RestApiException {
String documentSelection;
OperationHandler.VisitOptions options;
try {
documentSelection = documentSelectionFromRequest(restUri, request);
options = visitOptionsFromRequest(request);
} catch (BadRequestParameterException e) {
return createInvalidParameterResponse(e.getParameter(), e.getMessage());
}
final OperationHandler.VisitResult visit = operationHandler.visit(restUri, documentSelection, options);
final ObjectNode resultNode = mapper.createObjectNode();
visit.token.ifPresent(t -> resultNode.put(CONTINUATION, t));
resultNode.putArray(DOCUMENTS).addPOJO(visit.documentsAsJsonList);
resultNode.put(PATH_NAME, restUri.getRawPath());
HttpResponse httpResponse = new HttpResponse(200) {
@Override
public String getContentType() { return APPLICATION_JSON; }
@Override
public void render(OutputStream outputStream) throws IOException {
try {
outputStream.write(resultNode.toString().getBytes(StandardCharsets.UTF_8));
} catch (Exception e) {
throw new RuntimeException(e);
}
}
};
return httpResponse;
} | documentSelection = documentSelectionFromRequest(restUri, request); | private HttpResponse handleVisit(RestUri restUri, HttpRequest request) throws RestApiException {
String documentSelection;
OperationHandler.VisitOptions options;
try {
documentSelection = documentSelectionFromRequest(restUri, request);
options = visitOptionsFromRequest(request);
} catch (BadRequestParameterException e) {
return createInvalidParameterResponse(e.getParameter(), e.getMessage());
}
final OperationHandler.VisitResult visit = operationHandler.visit(restUri, documentSelection, options);
final ObjectNode resultNode = mapper.createObjectNode();
visit.token.ifPresent(t -> resultNode.put(CONTINUATION, t));
resultNode.putArray(DOCUMENTS).addPOJO(visit.documentsAsJsonList);
resultNode.put(PATH_NAME, restUri.getRawPath());
HttpResponse httpResponse = new HttpResponse(200) {
@Override
public String getContentType() { return APPLICATION_JSON; }
@Override
public void render(OutputStream outputStream) throws IOException {
try {
outputStream.write(resultNode.toString().getBytes(StandardCharsets.UTF_8));
} catch (Exception e) {
throw new RuntimeException(e);
}
}
};
return httpResponse;
} | class BadRequestParameterException extends IllegalArgumentException {
private String parameter;
BadRequestParameterException(String parameter, String message) {
super(message);
this.parameter = parameter;
}
String getParameter() {
return parameter;
}
} | class BadRequestParameterException extends IllegalArgumentException {
private String parameter;
BadRequestParameterException(String parameter, String message) {
super(message);
this.parameter = parameter;
}
String getParameter() {
return parameter;
}
} |
Ahh yes, sorry 🙂 Basically, the `=` operator in document selection is a multi-purpose operator which for integers degenerates to `==`, but for strings can end up as glob-style operator that uses regexes behind the scenes. Operator `==` always implies a concrete match, which is the semantics we want here. I changed this to ensure we have a deterministic performance profile for comparisons. No reason to use `=` here in the first place, though it _does_ functionally work as intended. | private HttpResponse handleVisit(RestUri restUri, HttpRequest request) throws RestApiException {
String documentSelection;
OperationHandler.VisitOptions options;
try {
documentSelection = documentSelectionFromRequest(restUri, request);
options = visitOptionsFromRequest(request);
} catch (BadRequestParameterException e) {
return createInvalidParameterResponse(e.getParameter(), e.getMessage());
}
final OperationHandler.VisitResult visit = operationHandler.visit(restUri, documentSelection, options);
final ObjectNode resultNode = mapper.createObjectNode();
visit.token.ifPresent(t -> resultNode.put(CONTINUATION, t));
resultNode.putArray(DOCUMENTS).addPOJO(visit.documentsAsJsonList);
resultNode.put(PATH_NAME, restUri.getRawPath());
HttpResponse httpResponse = new HttpResponse(200) {
@Override
public String getContentType() { return APPLICATION_JSON; }
@Override
public void render(OutputStream outputStream) throws IOException {
try {
outputStream.write(resultNode.toString().getBytes(StandardCharsets.UTF_8));
} catch (Exception e) {
throw new RuntimeException(e);
}
}
};
return httpResponse;
} | documentSelection = documentSelectionFromRequest(restUri, request); | private HttpResponse handleVisit(RestUri restUri, HttpRequest request) throws RestApiException {
String documentSelection;
OperationHandler.VisitOptions options;
try {
documentSelection = documentSelectionFromRequest(restUri, request);
options = visitOptionsFromRequest(request);
} catch (BadRequestParameterException e) {
return createInvalidParameterResponse(e.getParameter(), e.getMessage());
}
final OperationHandler.VisitResult visit = operationHandler.visit(restUri, documentSelection, options);
final ObjectNode resultNode = mapper.createObjectNode();
visit.token.ifPresent(t -> resultNode.put(CONTINUATION, t));
resultNode.putArray(DOCUMENTS).addPOJO(visit.documentsAsJsonList);
resultNode.put(PATH_NAME, restUri.getRawPath());
HttpResponse httpResponse = new HttpResponse(200) {
@Override
public String getContentType() { return APPLICATION_JSON; }
@Override
public void render(OutputStream outputStream) throws IOException {
try {
outputStream.write(resultNode.toString().getBytes(StandardCharsets.UTF_8));
} catch (Exception e) {
throw new RuntimeException(e);
}
}
};
return httpResponse;
} | class BadRequestParameterException extends IllegalArgumentException {
private String parameter;
BadRequestParameterException(String parameter, String message) {
super(message);
this.parameter = parameter;
}
String getParameter() {
return parameter;
}
} | class BadRequestParameterException extends IllegalArgumentException {
private String parameter;
BadRequestParameterException(String parameter, String message) {
super(message);
this.parameter = parameter;
}
String getParameter() {
return parameter;
}
} |
Actually, we could trigger something through `screwdriver/v1 ` that wasn't tested yet. If we cancel the change between triggering, and deployment, this could go wrong. Of course, this won't happen, or at least not until I fix this :p | private void validateChange(Application application, ZoneId zone, Version version) {
if ( ! application.deploymentJobs().isDeployableTo(zone.environment(), application.change())) {
throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone +
" as " + application.change() + " is not tested");
}
Deployment existingDeployment = application.deployments().get(zone);
if (zone.environment().isProduction() && existingDeployment != null &&
existingDeployment.version().isAfter(version)) {
throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone +
" as the requested version " + version + " is older than" +
" the current version " + existingDeployment.version());
}
} | if ( ! application.deploymentJobs().isDeployableTo(zone.environment(), application.change())) { | private void validateChange(Application application, ZoneId zone, Version version) {
if ( ! application.deploymentJobs().isDeployableTo(zone.environment(), application.change())) {
throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone +
" as " + application.change() + " is not tested");
}
Deployment existingDeployment = application.deployments().get(zone);
if (zone.environment().isProduction() && existingDeployment != null &&
existingDeployment.version().isAfter(version)) {
throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone +
" as the requested version " + version + " is older than" +
" the current version " + existingDeployment.version());
}
} | class ApplicationController {
private static final Logger log = Logger.getLogger(ApplicationController.class.getName());
/** The controller owning this */
private final Controller controller;
/** For permanent storage */
private final ControllerDb db;
/** For working memory storage and sharing between controllers */
private final CuratorDb curator;
private final ArtifactRepository artifactRepository;
private final RotationRepository rotationRepository;
private final AthenzClientFactory zmsClientFactory;
private final NameService nameService;
private final ConfigServerClient configServer;
private final RoutingGenerator routingGenerator;
private final Clock clock;
private final DeploymentTrigger deploymentTrigger;
ApplicationController(Controller controller, ControllerDb db, CuratorDb curator,
AthenzClientFactory zmsClientFactory, RotationsConfig rotationsConfig,
NameService nameService, ConfigServerClient configServer,
ArtifactRepository artifactRepository,
RoutingGenerator routingGenerator, BuildService buildService, Clock clock) {
this.controller = controller;
this.db = db;
this.curator = curator;
this.zmsClientFactory = zmsClientFactory;
this.nameService = nameService;
this.configServer = configServer;
this.routingGenerator = routingGenerator;
this.clock = clock;
this.artifactRepository = artifactRepository;
this.rotationRepository = new RotationRepository(rotationsConfig, this, curator);
this.deploymentTrigger = new DeploymentTrigger(controller, curator, buildService, clock);
for (Application application : db.listApplications()) {
lockIfPresent(application.id(), this::store);
}
}
/** Returns the application with the given id, or null if it is not present */
public Optional<Application> get(ApplicationId id) {
return db.getApplication(id);
}
/**
* Returns the application with the given id
*
* @throws IllegalArgumentException if it does not exist
*/
public Application require(ApplicationId id) {
return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found"));
}
/** Returns a snapshot of all applications */
public List<Application> asList() {
return sort(db.listApplications());
}
/** Returns all applications of a tenant */
public List<Application> asList(TenantName tenant) {
return sort(db.listApplications(tenant));
}
/**
* Set the rotations marked as 'global' either 'in' or 'out of' service.
*
* @return The canonical endpoint altered if any
* @throws IOException if rotation status cannot be updated
*/
public List<String> setGlobalRotationStatus(DeploymentId deploymentId, EndpointStatus status) throws IOException {
List<String> rotations = new ArrayList<>();
Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId);
if (endpoint.isPresent()) {
configServer.setGlobalRotationStatus(deploymentId, endpoint.get(), status);
rotations.add(endpoint.get());
}
return rotations;
}
/**
* Get the endpoint status for the global endpoint of this application
*
* @return Map between the endpoint and the rotation status
* @throws IOException if global rotation status cannot be determined
*/
public Map<String, EndpointStatus> getGlobalRotationStatus(DeploymentId deploymentId) throws IOException {
Map<String, EndpointStatus> result = new HashMap<>();
Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId);
if (endpoint.isPresent()) {
EndpointStatus status = configServer.getGlobalRotationStatus(deploymentId, endpoint.get());
result.put(endpoint.get(), status);
}
return result;
}
/**
* Global rotations (plural as we can have aliases) map to exactly one service endpoint.
* This method finds that one service endpoint and strips the URI part that
* the routingGenerator is wrapping around the endpoint.
*
* @param deploymentId The deployment to retrieve global service endpoint for
* @return Empty if no global endpoint exist, otherwise the service endpoint ([clustername.]app.tenant.region.env)
*/
Optional<String> getCanonicalGlobalEndpoint(DeploymentId deploymentId) throws IOException {
Map<String, RoutingEndpoint> hostToGlobalEndpoint = new HashMap<>();
Map<String, String> hostToCanonicalEndpoint = new HashMap<>();
for (RoutingEndpoint endpoint : routingGenerator.endpoints(deploymentId)) {
try {
URI uri = new URI(endpoint.getEndpoint());
String serviceEndpoint = uri.getHost();
if (serviceEndpoint == null) {
throw new IOException("Unexpected endpoints returned from the Routing Generator");
}
String canonicalEndpoint = serviceEndpoint.replaceAll(".vespa.yahooapis.com", "");
String hostname = endpoint.getHostname();
if (hostname != null) {
if (endpoint.isGlobal()) {
hostToGlobalEndpoint.put(hostname, endpoint);
} else {
hostToCanonicalEndpoint.put(hostname, canonicalEndpoint);
}
if (hostToGlobalEndpoint.containsKey(hostname) && hostToCanonicalEndpoint.containsKey(hostname)) {
return Optional.of(hostToCanonicalEndpoint.get(hostname));
}
}
} catch (URISyntaxException use) {
throw new IOException(use);
}
}
return Optional.empty();
}
/**
* Creates a new application for an existing tenant.
*
* @throws IllegalArgumentException if the application already exists
*/
public Application createApplication(ApplicationId id, Optional<NToken> token) {
if ( ! (id.instance().isDefault() || id.instance().value().matches("\\d+")))
throw new UnsupportedOperationException("Only the instance names 'default' and names which are just the PR number are supported at the moment");
try (Lock lock = lock(id)) {
if (asList(id.tenant()).stream().noneMatch(application -> application.id().application().equals(id.application())))
com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value());
Optional<Tenant> tenant = controller.tenants().tenant(id.tenant());
if ( ! tenant.isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist");
if (get(id).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application already exists");
if (get(dashToUnderscore(id)).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists");
if (id.instance().isDefault() && tenant.get() instanceof AthenzTenant) {
if ( ! token.isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': No NToken provided");
ZmsClient zmsClient = zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get());
zmsClient.addApplication(((AthenzTenant) tenant.get()).domain(),
new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value()));
}
LockedApplication application = new LockedApplication(new Application(id), lock);
store(application);
log.info("Created " + application);
return application;
}
}
/** Deploys an application. If the application does not exist it is created. */
public ActivateResult deployApplication(ApplicationId applicationId, ZoneId zone,
Optional<ApplicationPackage> applicationPackageFromDeployer,
DeployOptions options) {
try (Lock lock = lock(applicationId)) {
LockedApplication application = get(applicationId)
.map(app -> new LockedApplication(app, lock))
.orElseGet(() -> new LockedApplication(createApplication(applicationId, Optional.empty()), lock));
boolean canDeployDirectly = ! options.screwdriverBuildJob.map(job1 -> job1.screwdriverId).isPresent()
|| zone.environment().isManuallyDeployed();
boolean preferOldestVersion = options.deployCurrentVersion;
Version platformVersion;
ApplicationVersion applicationVersion;
ApplicationPackage applicationPackage;
if (canDeployDirectly) {
platformVersion = options.vespaVersion.map(Version::new).orElse(controller.systemVersion());
applicationVersion = ApplicationVersion.unknown;
applicationPackage = applicationPackageFromDeployer.orElseThrow(
() -> new IllegalArgumentException("Application package must be given when deploying to " + zone));
} else {
JobType jobType = JobType.from(controller.system(), zone)
.orElseThrow(() -> new IllegalArgumentException("No job found for zone " + zone));
Optional<JobStatus.JobRun> triggered = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType))
.flatMap(JobStatus::lastTriggered);
if ( ! triggered.isPresent())
return unexpectedDeployment(applicationId, zone);
platformVersion = preferOldestVersion
? application.oldestDeployedPlatform().orElse(controller.systemVersion())
: triggered.get().version();
applicationVersion = preferOldestVersion
? application.oldestDeployedApplication().orElse(triggered.get().applicationVersion())
: triggered.get().applicationVersion();
applicationPackage = new ApplicationPackage(artifactRepository.getApplicationPackage(application.id(), applicationVersion.id()));
}
validate(applicationPackage.deploymentSpec());
if ( ! preferOldestVersion) {
application = application.with(applicationPackage.deploymentSpec());
application = application.with(applicationPackage.validationOverrides());
application = deleteRemovedDeployments(application);
application = deleteUnreferencedDeploymentJobs(application);
store(application);
}
if ( ! canDeployDirectly) {
validateChange(application, zone, platformVersion);
}
application = withRotation(application, zone);
Set<String> rotationNames = new HashSet<>();
Set<String> cnames = new HashSet<>();
application.rotation().ifPresent(applicationRotation -> {
rotationNames.add(applicationRotation.id().asString());
cnames.add(applicationRotation.dnsName());
cnames.add(applicationRotation.secureDnsName());
});
options = withVersion(platformVersion, options);
ConfigServerClient.PreparedApplication preparedApplication;
DeploymentId deploymentId = new DeploymentId(applicationId, zone);
if (controller.system().equals(SystemName.cd) && deploymentId.applicationId().tenant().value().startsWith(Tenant.userPrefix)) {
preparedApplication = configServer.deploy(deploymentId, options, cnames, rotationNames, applicationPackage.zippedContent());
} else {
preparedApplication = configServer.prepare(deploymentId, options, cnames, rotationNames, applicationPackage.zippedContent());
preparedApplication.activate();
}
application = application.withNewDeployment(zone, applicationVersion, platformVersion, clock.instant());
store(application);
return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(),
applicationPackage.zippedContent().length);
}
}
/** Makes sure the application has a global rotation, if eligible. */
private LockedApplication withRotation(LockedApplication application, ZoneId zone) {
if (zone.environment() == Environment.prod && application.deploymentSpec().globalServiceId().isPresent()) {
try (RotationLock rotationLock = rotationRepository.lock()) {
Rotation rotation = rotationRepository.getRotation(application, rotationLock);
application = application.with(rotation.id());
store(application);
registerRotationInDns(rotation, application.rotation().get().dnsName());
registerRotationInDns(rotation, application.rotation().get().secureDnsName());
}
}
return application;
}
private ActivateResult unexpectedDeployment(ApplicationId applicationId, ZoneId zone) {
Log logEntry = new Log();
logEntry.level = "WARNING";
logEntry.time = clock.instant().toEpochMilli();
logEntry.message = "Ignoring deployment of " + require(applicationId) + " to " + zone +
" as a deployment is not currently expected";
PrepareResponse prepareResponse = new PrepareResponse();
prepareResponse.log = Collections.singletonList(logEntry);
prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList());
return new ActivateResult(new RevisionId("0"), prepareResponse, 0);
}
private LockedApplication deleteRemovedDeployments(LockedApplication application) {
List<Deployment> deploymentsToRemove = application.productionDeployments().values().stream()
.filter(deployment -> ! application.deploymentSpec().includes(deployment.zone().environment(),
Optional.of(deployment.zone().region())))
.collect(Collectors.toList());
if (deploymentsToRemove.isEmpty()) return application;
if ( ! application.validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant()))
throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application +
" is deployed in " +
deploymentsToRemove.stream()
.map(deployment -> deployment.zone().region().value())
.collect(Collectors.joining(", ")) +
", but does not include " +
(deploymentsToRemove.size() > 1 ? "these zones" : "this zone") +
" in deployment.xml");
LockedApplication applicationWithRemoval = application;
for (Deployment deployment : deploymentsToRemove)
applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone());
return applicationWithRemoval;
}
private LockedApplication deleteUnreferencedDeploymentJobs(LockedApplication application) {
for (JobType job : application.deploymentJobs().jobStatus().keySet()) {
Optional<ZoneId> zone = job.zone(controller.system());
if ( ! job.isProduction() || (zone.isPresent() && application.deploymentSpec().includes(zone.get().environment(), zone.map(ZoneId::region))))
continue;
application = application.withoutDeploymentJob(job);
}
return application;
}
private DeployOptions withVersion(Version version, DeployOptions options) {
return new DeployOptions(options.screwdriverBuildJob,
Optional.of(version),
options.ignoreValidationErrors,
options.deployCurrentVersion);
}
/** Register a DNS name for rotation */
private void registerRotationInDns(Rotation rotation, String dnsName) {
try {
Optional<Record> record = nameService.findRecord(Record.Type.CNAME, RecordName.from(dnsName));
RecordData rotationName = RecordData.fqdn(rotation.name());
if (record.isPresent()) {
if ( ! record.get().data().equals(rotationName)) {
nameService.updateRecord(record.get().id(), rotationName);
log.info("Updated mapping for record ID " + record.get().id().asString() + ": '" + dnsName
+ "' -> '" + rotation.name() + "'");
}
} else {
RecordId id = nameService.createCname(RecordName.from(dnsName), rotationName);
log.info("Registered mapping with record ID " + id.asString() + ": '" + dnsName + "' -> '"
+ rotation.name() + "'");
}
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to register CNAME", e);
}
}
/** Returns the endpoints of the deployment, or an empty list if the request fails */
public Optional<List<URI>> getDeploymentEndpoints(DeploymentId deploymentId) {
try {
return Optional.of(ImmutableList.copyOf(routingGenerator.endpoints(deploymentId).stream()
.map(RoutingEndpoint::getEndpoint)
.map(URI::create)
.iterator()));
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId + ": "
+ Exceptions.toMessageString(e));
return Optional.empty();
}
}
/**
* Deletes the the given application. All known instances of the applications will be deleted,
* including PR instances.
*
* @throws IllegalArgumentException if the application has deployments or the caller is not authorized
* @throws NotExistsException if no instances of the application exist
*/
public void deleteApplication(ApplicationId applicationId, Optional<NToken> token) {
List<ApplicationId> instances = controller.applications().asList(applicationId.tenant())
.stream()
.map(Application::id)
.filter(id -> id.application().equals(applicationId.application()) &&
id.tenant().equals(applicationId.tenant()))
.collect(Collectors.toList());
if (instances.isEmpty()) {
throw new NotExistsException("Could not delete application '" + applicationId + "': Application not found");
}
instances.forEach(id -> lockOrThrow(id, application -> {
if ( ! application.deployments().isEmpty())
throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments");
Tenant tenant = controller.tenants().tenant(id.tenant()).get();
if (tenant instanceof AthenzTenant && ! token.isPresent())
throw new IllegalArgumentException("Could not delete '" + application + "': No NToken provided");
if (id.instance().isDefault() && tenant instanceof AthenzTenant) {
zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get())
.deleteApplication(((AthenzTenant) tenant).domain(),
new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value()));
}
db.deleteApplication(id);
log.info("Deleted " + application);
}));
}
/**
* Replace any previous version of this application by this instance
*
* @param application a locked application to store
*/
public void store(LockedApplication application) {
db.store(application);
}
/**
* Acquire a locked application to modify and store, if there is an application with the given id.
*
* @param applicationId ID of the application to lock and get.
* @param action Function which acts on the locked application.
*/
public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action);
}
}
/**
* Acquire a locked application to modify and store, or throw an exception if no application has the given id.
*
* @param applicationId ID of the application to lock and require.
* @param action Function which acts on the locked application.
* @throws IllegalArgumentException when application does not exist.
*/
public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
action.accept(new LockedApplication(require(applicationId), lock));
}
}
/**
* Tells config server to schedule a restart of all nodes in this deployment
*
* @param hostname If non-empty, restart will only be scheduled for this host
*/
public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) {
try {
configServer.restart(deploymentId, hostname);
}
catch (NoInstanceException e) {
throw new IllegalArgumentException("Could not restart " + deploymentId + ": No such deployment");
}
}
/** Deactivate application in the given zone */
public void deactivate(Application application, ZoneId zone) {
deactivate(application, zone, Optional.empty(), false);
}
/** Deactivate a known deployment of the given application */
public void deactivate(Application application, Deployment deployment, boolean requireThatDeploymentHasExpired) {
deactivate(application, deployment.zone(), Optional.of(deployment), requireThatDeploymentHasExpired);
}
private void deactivate(Application application, ZoneId zone, Optional<Deployment> deployment,
boolean requireThatDeploymentHasExpired) {
if (requireThatDeploymentHasExpired && deployment.isPresent()
&& ! DeploymentExpirer.hasExpired(controller.zoneRegistry(), deployment.get(), clock.instant()))
return;
lockOrThrow(application.id(), lockedApplication -> store(deactivate(lockedApplication, zone)));
}
/**
* Deactivates a locked application without storing it
*
* @return the application with the deployment in the given zone removed
*/
private LockedApplication deactivate(LockedApplication application, ZoneId zone) {
try {
configServer.deactivate(new DeploymentId(application.id(), zone));
}
catch (NoInstanceException ignored) {
}
return application.withoutDeploymentIn(zone);
}
public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; }
private ApplicationId dashToUnderscore(ApplicationId id) {
return ApplicationId.from(id.tenant().value(),
id.application().value().replaceAll("-", "_"),
id.instance().value());
}
public ConfigServerClient configServer() { return configServer; }
/**
* Returns a lock which provides exclusive rights to changing this application.
* Any operation which stores an application need to first acquire this lock, then read, modify
* and store the application, and finally release (close) the lock.
*/
Lock lock(ApplicationId application) {
return curator.lock(application, Duration.ofMinutes(10));
}
/** Verify that each of the production zones listed in the deployment spec exist in this system. */
private void validate(DeploymentSpec deploymentSpec) {
deploymentSpec.zones().stream()
.filter(zone -> zone.environment() == Environment.prod)
.forEach(zone -> {
if ( ! controller.zoneRegistry().hasZone(ZoneId.from(zone.environment(),
zone.region().orElse(null)))) {
throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in " +
"this system!");
}
});
}
/** Verify that what we want to deploy is tested and that we aren't downgrading */
public RotationRepository rotationRepository() {
return rotationRepository;
}
/** Sort given list of applications by application ID */
private static List<Application> sort(List<Application> applications) {
return applications.stream().sorted(Comparator.comparing(Application::id)).collect(Collectors.toList());
}
} | class ApplicationController {
private static final Logger log = Logger.getLogger(ApplicationController.class.getName());
/** The controller owning this */
private final Controller controller;
/** For permanent storage */
private final ControllerDb db;
/** For working memory storage and sharing between controllers */
private final CuratorDb curator;
private final ArtifactRepository artifactRepository;
private final RotationRepository rotationRepository;
private final AthenzClientFactory zmsClientFactory;
private final NameService nameService;
private final ConfigServerClient configServer;
private final RoutingGenerator routingGenerator;
private final Clock clock;
private final DeploymentTrigger deploymentTrigger;
ApplicationController(Controller controller, ControllerDb db, CuratorDb curator,
AthenzClientFactory zmsClientFactory, RotationsConfig rotationsConfig,
NameService nameService, ConfigServerClient configServer,
ArtifactRepository artifactRepository,
RoutingGenerator routingGenerator, BuildService buildService, Clock clock) {
this.controller = controller;
this.db = db;
this.curator = curator;
this.zmsClientFactory = zmsClientFactory;
this.nameService = nameService;
this.configServer = configServer;
this.routingGenerator = routingGenerator;
this.clock = clock;
this.artifactRepository = artifactRepository;
this.rotationRepository = new RotationRepository(rotationsConfig, this, curator);
this.deploymentTrigger = new DeploymentTrigger(controller, curator, buildService, clock);
for (Application application : db.listApplications()) {
lockIfPresent(application.id(), this::store);
}
}
/** Returns the application with the given id, or null if it is not present */
public Optional<Application> get(ApplicationId id) {
return db.getApplication(id);
}
/**
* Returns the application with the given id
*
* @throws IllegalArgumentException if it does not exist
*/
public Application require(ApplicationId id) {
return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found"));
}
/** Returns a snapshot of all applications */
public List<Application> asList() {
return sort(db.listApplications());
}
/** Returns all applications of a tenant */
public List<Application> asList(TenantName tenant) {
return sort(db.listApplications(tenant));
}
/**
* Set the rotations marked as 'global' either 'in' or 'out of' service.
*
* @return The canonical endpoint altered if any
* @throws IOException if rotation status cannot be updated
*/
public List<String> setGlobalRotationStatus(DeploymentId deploymentId, EndpointStatus status) throws IOException {
List<String> rotations = new ArrayList<>();
Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId);
if (endpoint.isPresent()) {
configServer.setGlobalRotationStatus(deploymentId, endpoint.get(), status);
rotations.add(endpoint.get());
}
return rotations;
}
/**
* Get the endpoint status for the global endpoint of this application
*
* @return Map between the endpoint and the rotation status
* @throws IOException if global rotation status cannot be determined
*/
public Map<String, EndpointStatus> getGlobalRotationStatus(DeploymentId deploymentId) throws IOException {
Map<String, EndpointStatus> result = new HashMap<>();
Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId);
if (endpoint.isPresent()) {
EndpointStatus status = configServer.getGlobalRotationStatus(deploymentId, endpoint.get());
result.put(endpoint.get(), status);
}
return result;
}
/**
* Global rotations (plural as we can have aliases) map to exactly one service endpoint.
* This method finds that one service endpoint and strips the URI part that
* the routingGenerator is wrapping around the endpoint.
*
* @param deploymentId The deployment to retrieve global service endpoint for
* @return Empty if no global endpoint exist, otherwise the service endpoint ([clustername.]app.tenant.region.env)
*/
Optional<String> getCanonicalGlobalEndpoint(DeploymentId deploymentId) throws IOException {
Map<String, RoutingEndpoint> hostToGlobalEndpoint = new HashMap<>();
Map<String, String> hostToCanonicalEndpoint = new HashMap<>();
for (RoutingEndpoint endpoint : routingGenerator.endpoints(deploymentId)) {
try {
URI uri = new URI(endpoint.getEndpoint());
String serviceEndpoint = uri.getHost();
if (serviceEndpoint == null) {
throw new IOException("Unexpected endpoints returned from the Routing Generator");
}
String canonicalEndpoint = serviceEndpoint.replaceAll(".vespa.yahooapis.com", "");
String hostname = endpoint.getHostname();
if (hostname != null) {
if (endpoint.isGlobal()) {
hostToGlobalEndpoint.put(hostname, endpoint);
} else {
hostToCanonicalEndpoint.put(hostname, canonicalEndpoint);
}
if (hostToGlobalEndpoint.containsKey(hostname) && hostToCanonicalEndpoint.containsKey(hostname)) {
return Optional.of(hostToCanonicalEndpoint.get(hostname));
}
}
} catch (URISyntaxException use) {
throw new IOException(use);
}
}
return Optional.empty();
}
/**
* Creates a new application for an existing tenant.
*
* @throws IllegalArgumentException if the application already exists
*/
public Application createApplication(ApplicationId id, Optional<NToken> token) {
if ( ! (id.instance().isDefault() || id.instance().value().matches("\\d+")))
throw new UnsupportedOperationException("Only the instance names 'default' and names which are just the PR number are supported at the moment");
try (Lock lock = lock(id)) {
if (asList(id.tenant()).stream().noneMatch(application -> application.id().application().equals(id.application())))
com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value());
Optional<Tenant> tenant = controller.tenants().tenant(id.tenant());
if ( ! tenant.isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist");
if (get(id).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application already exists");
if (get(dashToUnderscore(id)).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists");
if (id.instance().isDefault() && tenant.get() instanceof AthenzTenant) {
if ( ! token.isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': No NToken provided");
ZmsClient zmsClient = zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get());
zmsClient.addApplication(((AthenzTenant) tenant.get()).domain(),
new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value()));
}
LockedApplication application = new LockedApplication(new Application(id), lock);
store(application);
log.info("Created " + application);
return application;
}
}
/** Deploys an application. If the application does not exist it is created. */
public ActivateResult deployApplication(ApplicationId applicationId, ZoneId zone,
Optional<ApplicationPackage> applicationPackageFromDeployer,
DeployOptions options) {
try (Lock lock = lock(applicationId)) {
LockedApplication application = get(applicationId)
.map(app -> new LockedApplication(app, lock))
.orElseGet(() -> new LockedApplication(createApplication(applicationId, Optional.empty()), lock));
boolean canDeployDirectly = ! options.screwdriverBuildJob.map(job1 -> job1.screwdriverId).isPresent()
|| zone.environment().isManuallyDeployed();
boolean preferOldestVersion = options.deployCurrentVersion;
Version platformVersion;
ApplicationVersion applicationVersion;
ApplicationPackage applicationPackage;
if (canDeployDirectly) {
platformVersion = options.vespaVersion.map(Version::new).orElse(controller.systemVersion());
applicationVersion = ApplicationVersion.unknown;
applicationPackage = applicationPackageFromDeployer.orElseThrow(
() -> new IllegalArgumentException("Application package must be given when deploying to " + zone));
} else {
JobType jobType = JobType.from(controller.system(), zone)
.orElseThrow(() -> new IllegalArgumentException("No job found for zone " + zone));
Optional<JobStatus.JobRun> triggered = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType))
.flatMap(JobStatus::lastTriggered);
if ( ! triggered.isPresent())
return unexpectedDeployment(applicationId, zone);
platformVersion = preferOldestVersion
? application.oldestDeployedPlatform().orElse(controller.systemVersion())
: triggered.get().version();
applicationVersion = preferOldestVersion
? application.oldestDeployedApplication().orElse(triggered.get().applicationVersion())
: triggered.get().applicationVersion();
applicationPackage = new ApplicationPackage(artifactRepository.getApplicationPackage(application.id(), applicationVersion.id()));
}
validate(applicationPackage.deploymentSpec());
if ( ! preferOldestVersion) {
application = application.with(applicationPackage.deploymentSpec());
application = application.with(applicationPackage.validationOverrides());
application = deleteRemovedDeployments(application);
application = deleteUnreferencedDeploymentJobs(application);
store(application);
}
if ( ! canDeployDirectly) {
validateChange(application, zone, platformVersion);
}
application = withRotation(application, zone);
Set<String> rotationNames = new HashSet<>();
Set<String> cnames = new HashSet<>();
application.rotation().ifPresent(applicationRotation -> {
rotationNames.add(applicationRotation.id().asString());
cnames.add(applicationRotation.dnsName());
cnames.add(applicationRotation.secureDnsName());
});
options = withVersion(platformVersion, options);
ConfigServerClient.PreparedApplication preparedApplication;
DeploymentId deploymentId = new DeploymentId(applicationId, zone);
if (controller.system().equals(SystemName.cd) && deploymentId.applicationId().tenant().value().startsWith(Tenant.userPrefix)) {
preparedApplication = configServer.deploy(deploymentId, options, cnames, rotationNames, applicationPackage.zippedContent());
} else {
preparedApplication = configServer.prepare(deploymentId, options, cnames, rotationNames, applicationPackage.zippedContent());
preparedApplication.activate();
}
application = application.withNewDeployment(zone, applicationVersion, platformVersion, clock.instant());
store(application);
return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(),
applicationPackage.zippedContent().length);
}
}
/** Makes sure the application has a global rotation, if eligible. */
private LockedApplication withRotation(LockedApplication application, ZoneId zone) {
if (zone.environment() == Environment.prod && application.deploymentSpec().globalServiceId().isPresent()) {
try (RotationLock rotationLock = rotationRepository.lock()) {
Rotation rotation = rotationRepository.getRotation(application, rotationLock);
application = application.with(rotation.id());
store(application);
registerRotationInDns(rotation, application.rotation().get().dnsName());
registerRotationInDns(rotation, application.rotation().get().secureDnsName());
}
}
return application;
}
private ActivateResult unexpectedDeployment(ApplicationId applicationId, ZoneId zone) {
Log logEntry = new Log();
logEntry.level = "WARNING";
logEntry.time = clock.instant().toEpochMilli();
logEntry.message = "Ignoring deployment of " + require(applicationId) + " to " + zone +
" as a deployment is not currently expected";
PrepareResponse prepareResponse = new PrepareResponse();
prepareResponse.log = Collections.singletonList(logEntry);
prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList());
return new ActivateResult(new RevisionId("0"), prepareResponse, 0);
}
private LockedApplication deleteRemovedDeployments(LockedApplication application) {
List<Deployment> deploymentsToRemove = application.productionDeployments().values().stream()
.filter(deployment -> ! application.deploymentSpec().includes(deployment.zone().environment(),
Optional.of(deployment.zone().region())))
.collect(Collectors.toList());
if (deploymentsToRemove.isEmpty()) return application;
if ( ! application.validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant()))
throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application +
" is deployed in " +
deploymentsToRemove.stream()
.map(deployment -> deployment.zone().region().value())
.collect(Collectors.joining(", ")) +
", but does not include " +
(deploymentsToRemove.size() > 1 ? "these zones" : "this zone") +
" in deployment.xml");
LockedApplication applicationWithRemoval = application;
for (Deployment deployment : deploymentsToRemove)
applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone());
return applicationWithRemoval;
}
private LockedApplication deleteUnreferencedDeploymentJobs(LockedApplication application) {
for (JobType job : application.deploymentJobs().jobStatus().keySet()) {
Optional<ZoneId> zone = job.zone(controller.system());
if ( ! job.isProduction() || (zone.isPresent() && application.deploymentSpec().includes(zone.get().environment(), zone.map(ZoneId::region))))
continue;
application = application.withoutDeploymentJob(job);
}
return application;
}
private DeployOptions withVersion(Version version, DeployOptions options) {
return new DeployOptions(options.screwdriverBuildJob,
Optional.of(version),
options.ignoreValidationErrors,
options.deployCurrentVersion);
}
/** Register a DNS name for rotation */
private void registerRotationInDns(Rotation rotation, String dnsName) {
try {
Optional<Record> record = nameService.findRecord(Record.Type.CNAME, RecordName.from(dnsName));
RecordData rotationName = RecordData.fqdn(rotation.name());
if (record.isPresent()) {
if ( ! record.get().data().equals(rotationName)) {
nameService.updateRecord(record.get().id(), rotationName);
log.info("Updated mapping for record ID " + record.get().id().asString() + ": '" + dnsName
+ "' -> '" + rotation.name() + "'");
}
} else {
RecordId id = nameService.createCname(RecordName.from(dnsName), rotationName);
log.info("Registered mapping with record ID " + id.asString() + ": '" + dnsName + "' -> '"
+ rotation.name() + "'");
}
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to register CNAME", e);
}
}
/** Returns the endpoints of the deployment, or an empty list if the request fails */
public Optional<List<URI>> getDeploymentEndpoints(DeploymentId deploymentId) {
try {
return Optional.of(ImmutableList.copyOf(routingGenerator.endpoints(deploymentId).stream()
.map(RoutingEndpoint::getEndpoint)
.map(URI::create)
.iterator()));
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId + ": "
+ Exceptions.toMessageString(e));
return Optional.empty();
}
}
/**
* Deletes the the given application. All known instances of the applications will be deleted,
* including PR instances.
*
* @throws IllegalArgumentException if the application has deployments or the caller is not authorized
* @throws NotExistsException if no instances of the application exist
*/
public void deleteApplication(ApplicationId applicationId, Optional<NToken> token) {
List<ApplicationId> instances = controller.applications().asList(applicationId.tenant())
.stream()
.map(Application::id)
.filter(id -> id.application().equals(applicationId.application()) &&
id.tenant().equals(applicationId.tenant()))
.collect(Collectors.toList());
if (instances.isEmpty()) {
throw new NotExistsException("Could not delete application '" + applicationId + "': Application not found");
}
instances.forEach(id -> lockOrThrow(id, application -> {
if ( ! application.deployments().isEmpty())
throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments");
Tenant tenant = controller.tenants().tenant(id.tenant()).get();
if (tenant instanceof AthenzTenant && ! token.isPresent())
throw new IllegalArgumentException("Could not delete '" + application + "': No NToken provided");
if (id.instance().isDefault() && tenant instanceof AthenzTenant) {
zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get())
.deleteApplication(((AthenzTenant) tenant).domain(),
new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value()));
}
db.deleteApplication(id);
log.info("Deleted " + application);
}));
}
/**
* Replace any previous version of this application by this instance
*
* @param application a locked application to store
*/
public void store(LockedApplication application) {
db.store(application);
}
/**
* Acquire a locked application to modify and store, if there is an application with the given id.
*
* @param applicationId ID of the application to lock and get.
* @param action Function which acts on the locked application.
*/
public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action);
}
}
/**
* Acquire a locked application to modify and store, or throw an exception if no application has the given id.
*
* @param applicationId ID of the application to lock and require.
* @param action Function which acts on the locked application.
* @throws IllegalArgumentException when application does not exist.
*/
public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
action.accept(new LockedApplication(require(applicationId), lock));
}
}
/**
* Tells config server to schedule a restart of all nodes in this deployment
*
* @param hostname If non-empty, restart will only be scheduled for this host
*/
public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) {
try {
configServer.restart(deploymentId, hostname);
}
catch (NoInstanceException e) {
throw new IllegalArgumentException("Could not restart " + deploymentId + ": No such deployment");
}
}
/** Deactivate application in the given zone */
public void deactivate(Application application, ZoneId zone) {
deactivate(application, zone, Optional.empty(), false);
}
/** Deactivate a known deployment of the given application */
public void deactivate(Application application, Deployment deployment, boolean requireThatDeploymentHasExpired) {
deactivate(application, deployment.zone(), Optional.of(deployment), requireThatDeploymentHasExpired);
}
private void deactivate(Application application, ZoneId zone, Optional<Deployment> deployment,
boolean requireThatDeploymentHasExpired) {
if (requireThatDeploymentHasExpired && deployment.isPresent()
&& ! DeploymentExpirer.hasExpired(controller.zoneRegistry(), deployment.get(), clock.instant()))
return;
lockOrThrow(application.id(), lockedApplication -> store(deactivate(lockedApplication, zone)));
}
/**
* Deactivates a locked application without storing it
*
* @return the application with the deployment in the given zone removed
*/
private LockedApplication deactivate(LockedApplication application, ZoneId zone) {
try {
configServer.deactivate(new DeploymentId(application.id(), zone));
}
catch (NoInstanceException ignored) {
}
return application.withoutDeploymentIn(zone);
}
public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; }
private ApplicationId dashToUnderscore(ApplicationId id) {
return ApplicationId.from(id.tenant().value(),
id.application().value().replaceAll("-", "_"),
id.instance().value());
}
public ConfigServerClient configServer() { return configServer; }
/**
* Returns a lock which provides exclusive rights to changing this application.
* Any operation which stores an application need to first acquire this lock, then read, modify
* and store the application, and finally release (close) the lock.
*/
Lock lock(ApplicationId application) {
return curator.lock(application, Duration.ofMinutes(10));
}
/** Verify that each of the production zones listed in the deployment spec exist in this system. */
private void validate(DeploymentSpec deploymentSpec) {
deploymentSpec.zones().stream()
.filter(zone -> zone.environment() == Environment.prod)
.forEach(zone -> {
if ( ! controller.zoneRegistry().hasZone(ZoneId.from(zone.environment(),
zone.region().orElse(null)))) {
throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in " +
"this system!");
}
});
}
/** Verify that what we want to deploy is tested and that we aren't downgrading */
public RotationRepository rotationRepository() {
return rotationRepository;
}
/** Sort given list of applications by application ID */
private static List<Application> sort(List<Application> applications) {
return applications.stream().sorted(Comparator.comparing(Application::id)).collect(Collectors.toList());
}
} |
double space in this message, remove trailing space on previous line or leading space on this line | public void app_without_athenz_in_deployment_fails_validation() throws Exception {
exceptionRule.expect(IllegalArgumentException.class);
exceptionRule.expectMessage(
"Container cluster 'default' uses a secret store, so an Athenz domain and " +
" an Athenz service must be declared in deployment.xml.");
DeployState deployState = deployState(servicesXml(), deploymentXml(false));
VespaModel model = new VespaModel(new NullConfigModelRegistry(), deployState);
new SecretStoreValidator().validate(model, deployState);
} | " an Athenz service must be declared in deployment.xml."); | public void app_without_athenz_in_deployment_fails_validation() throws Exception {
exceptionRule.expect(IllegalArgumentException.class);
exceptionRule.expectMessage(
"Container cluster 'default' uses a secret store, so an Athenz domain and" +
" an Athenz service must be declared in deployment.xml.");
DeployState deployState = deployState(servicesXml(), deploymentXml(false));
VespaModel model = new VespaModel(new NullConfigModelRegistry(), deployState);
new SecretStoreValidator().validate(model, deployState);
} | class SecretStoreValidatorTest {
@Rule
public final ExpectedException exceptionRule = ExpectedException.none();
private static String servicesXml() {
return joinLines("<services version='1.0'>",
" <container id='default' version='1.0'>",
" <secret-store>",
" <group name='group1' environment='prod'/>",
" </secret-store>",
" </container>",
"</services>");
}
private static String deploymentXml(boolean addAthenz) {
return joinLines("<deployment version='1.0' " + (addAthenz ?
"athenz-domain='domain' athenz-service='service'" : "") + ">",
" <prod />",
"</deployment>");
}
@Test
public void app_with_athenz_in_deployment_passes_validation() throws Exception {
DeployState deployState = deployState(servicesXml(), deploymentXml(true));
VespaModel model = new VespaModel(new NullConfigModelRegistry(), deployState);
new SecretStoreValidator().validate(model, deployState);
}
@Test
@Test
public void app_without_secret_store_passes_validation_without_athenz_in_deployment() throws Exception {
String servicesXml = joinLines("<services version='1.0'>",
" <container id='default' version='1.0' />",
"</services>");
DeployState deployState = deployState(servicesXml, deploymentXml(false));
VespaModel model = new VespaModel(new NullConfigModelRegistry(), deployState);
new SecretStoreValidator().validate(model, deployState);
}
private static DeployState deployState(String servicesXml, String deploymentXml) {
ApplicationPackage app = new MockApplicationPackage.Builder()
.withServices(servicesXml)
.withDeploymentSpec(deploymentXml)
.build();
DeployState.Builder builder = new DeployState.Builder()
.applicationPackage(app)
.zone(new Zone(Environment.prod, RegionName.from("foo")))
.properties(new DeployProperties.Builder()
.hostedVespa(true)
.build());
final DeployState deployState = builder.build(true);
assertTrue("Test must emulate a hosted deployment.", deployState.isHosted());
return deployState;
}
} | class SecretStoreValidatorTest {
@Rule
public final ExpectedException exceptionRule = ExpectedException.none();
private static String servicesXml() {
return joinLines("<services version='1.0'>",
" <container id='default' version='1.0'>",
" <secret-store>",
" <group name='group1' environment='prod'/>",
" </secret-store>",
" </container>",
"</services>");
}
private static String deploymentXml(boolean addAthenz) {
return joinLines("<deployment version='1.0' " + (addAthenz ?
"athenz-domain='domain' athenz-service='service'" : "") + ">",
" <prod />",
"</deployment>");
}
@Test
public void app_with_athenz_in_deployment_passes_validation() throws Exception {
DeployState deployState = deployState(servicesXml(), deploymentXml(true));
VespaModel model = new VespaModel(new NullConfigModelRegistry(), deployState);
new SecretStoreValidator().validate(model, deployState);
}
@Test
@Test
public void app_without_secret_store_passes_validation_without_athenz_in_deployment() throws Exception {
String servicesXml = joinLines("<services version='1.0'>",
" <container id='default' version='1.0' />",
"</services>");
DeployState deployState = deployState(servicesXml, deploymentXml(false));
VespaModel model = new VespaModel(new NullConfigModelRegistry(), deployState);
new SecretStoreValidator().validate(model, deployState);
}
private static DeployState deployState(String servicesXml, String deploymentXml) {
ApplicationPackage app = new MockApplicationPackage.Builder()
.withServices(servicesXml)
.withDeploymentSpec(deploymentXml)
.build();
DeployState.Builder builder = new DeployState.Builder()
.applicationPackage(app)
.zone(new Zone(Environment.prod, RegionName.from("foo")))
.properties(new DeployProperties.Builder()
.hostedVespa(true)
.build());
final DeployState deployState = builder.build(true);
assertTrue("Test must emulate a hosted deployment.", deployState.isHosted());
return deployState;
}
} |
At least indent if you're not going to use `{}` 😞 | public boolean trigger(Job job) {
log.log(LogLevel.INFO, String.format("Attempting to trigger %s for %s, deploying %s: %s (platform: %s, application: %s)", job.jobType, job.id, job.change, job.reason, job.platformVersion, job.applicationVersion.id()));
try {
buildService.trigger(new BuildService.BuildJob(job.projectId, job.jobType.jobName()));
applications().lockOrThrow(job.id, application -> applications().store(application.withJobTriggering(
job.jobType, new JobStatus.JobRun(-1, job.platformVersion, job.applicationVersion, job.reason, clock.instant()))));
return true;
}
catch (RuntimeException e) {
if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException)
applications().lockOrThrow(job.id, application -> applications().store(application.withProjectId(Optional.empty())));
log.log(LogLevel.WARNING, String.format("Exception triggering %s for %s (%s): %s", job.jobType, job.id, job.projectId, e));
return false;
}
} | applications().lockOrThrow(job.id, application -> applications().store(application.withProjectId(Optional.empty()))); | public boolean trigger(Job job) {
log.log(LogLevel.INFO, String.format("Attempting to trigger %s for %s, deploying %s: %s (platform: %s, application: %s)", job.jobType, job.id, job.change, job.reason, job.platformVersion, job.applicationVersion.id()));
try {
buildService.trigger(new BuildService.BuildJob(job.projectId, job.jobType.jobName()));
applications().lockOrThrow(job.id, application -> applications().store(application.withJobTriggering(
job.jobType, new JobStatus.JobRun(-1, job.platformVersion, job.applicationVersion, job.reason, clock.instant()))));
return true;
}
catch (RuntimeException e) {
if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException)
applications().lockOrThrow(job.id, application -> applications().store(application.withProjectId(Optional.empty())));
log.log(LogLevel.WARNING, String.format("Exception triggering %s for %s (%s): %s", job.jobType, job.id, job.projectId, e));
return false;
}
} | class DeploymentTrigger {
private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName());
private final Controller controller;
private final Clock clock;
private final DeploymentOrder order;
private final BuildService buildService;
public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) {
Objects.requireNonNull(controller, "controller cannot be null");
Objects.requireNonNull(curator, "curator cannot be null");
Objects.requireNonNull(clock, "clock cannot be null");
this.controller = controller;
this.clock = clock;
this.order = new DeploymentOrder(controller::system);
this.buildService = buildService;
}
public DeploymentOrder deploymentOrder() {
return order;
}
/**
* Called each time a job completes (successfully or not) to record information used when deciding what to trigger.
*/
public void notifyOfCompletion(JobReport report) {
log.log(LogLevel.INFO, String.format("Got notified of %s for %s of %s (%d).",
report.jobError().map(JobError::toString).orElse("success"),
report.jobType(),
report.applicationId(),
report.projectId()));
if ( ! applications().get(report.applicationId()).isPresent()) {
log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() +
"': Unknown application '" + report.applicationId() + "'");
return;
}
applications().lockOrThrow(report.applicationId(), application -> {
ApplicationVersion applicationVersion = report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber()))
.orElse(ApplicationVersion.unknown);
application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller);
application = application.withProjectId(Optional.of(report.projectId()));
if (report.jobType() == JobType.component && report.success()) {
if (acceptNewApplicationVersion(application))
application = application.withChange(application.change().with(applicationVersion));
else
application = application.withOutstandingChange(Change.of(applicationVersion));
}
applications().store(application);
});
}
/**
* Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs.
*
* Only one job is triggered each run for test jobs, since their environments have limited capacity.
*/
public long triggerReadyJobs() {
return computeReadyJobs().collect(partitioningBy(job -> job.jobType().isTest()))
.entrySet().stream()
.flatMap(entry -> (entry.getKey()
? entry.getValue().stream()
.sorted(comparing(Job::isRetry)
.thenComparing(Job::applicationUpgrade)
.reversed()
.thenComparing(Job::availableSince))
.collect(groupingBy(Job::jobType))
: entry.getValue().stream()
.collect(groupingBy(Job::id)))
.values().stream()
.map(jobs -> (Supplier<Long>) jobs.stream()
.filter(job -> canTrigger(job) && trigger(job))
.limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count))
.parallel().map(Supplier::get).reduce(0L, Long::sum);
}
/**
* Attempts to trigger the given job for the given application and returns the outcome.
*
* If the build service can not find the given job, or claims it is illegal to trigger it,
* the project id is removed from the application owning the job, to prevent further trigger attemps.
*/
/**
* Triggers a change of this application
*
* @param applicationId the application to trigger
* @throws IllegalArgumentException if this application already has an ongoing change
*/
public void triggerChange(ApplicationId applicationId, Change change) {
applications().lockOrThrow(applicationId, application -> {
if (application.change().isPresent() && ! application.deploymentJobs().hasFailures())
throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " +
application.change() + " is already in progress");
application = application.withChange(change);
if (change.application().isPresent())
application = application.withOutstandingChange(Change.empty());
applications().store(application);
});
}
/** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */
public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) {
applications().lockOrThrow(applicationId, application -> {
applications().store(application.withChange(application.change().application()
.map(Change::of)
.filter(change -> keepApplicationChange)
.orElse(Change.empty())));
});
}
/** Returns the set of all jobs which have changes to propagate from the upstream steps, sorted by job. */
public Stream<Job> computeReadyJobs() {
return ApplicationList.from(applications().asList())
.notPullRequest()
.withProjectId()
.deploying()
.idList().stream()
.map(this::computeReadyJobs)
.flatMap(List::stream);
}
/** Returns whether the given job is currently running; false if completed since last triggered, asking the build service othewise. */
public boolean isRunning(Application application, JobType jobType) {
return ! application.deploymentJobs().statusOf(jobType)
.flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))).orElse(false)
&& buildService.isRunning(new BuildService.BuildJob(application.deploymentJobs().projectId().get(), jobType.jobName()));
}
public Job forcedDeploymentJob(Application application, JobType jobType, String reason) {
return deploymentJob(application, jobType, reason, clock.instant(), Collections.emptySet());
}
private Job deploymentJob(Application application, JobType jobType, String reason, Instant availableSince, Collection<JobType> concurrentlyWith) {
boolean isRetry = application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::jobError)
.filter(JobError.outOfCapacity::equals).isPresent();
if (isRetry) reason += "; retrying on out of capacity";
Change change = application.change();
Version platform = jobType == JobType.component
? Version.emptyVersion
: deploymentFor(application, jobType).map(Deployment::version)
.filter(version -> ! change.upgrades(version))
.orElse(change.platform()
.orElse(application.oldestDeployedPlatform()
.orElse(controller.systemVersion())));
ApplicationVersion applicationVersion = jobType == JobType.component
? ApplicationVersion.unknown
: deploymentFor(application, jobType).map(Deployment::applicationVersion)
.filter(version -> ! change.upgrades(version))
.orElse(change.application()
.orElseGet(() -> application.oldestDeployedApplication()
.orElseThrow(() -> new IllegalArgumentException("Cannot determine application version to use for " + jobType))));
return new Job(application, jobType, reason, availableSince, concurrentlyWith, isRetry, change, platform, applicationVersion);
}
/**
* Finds the next step to trigger for the given application, if any, and returns these as a list.
*/
private List<Job> computeReadyJobs(ApplicationId id) {
List<Job> jobs = new ArrayList<>();
applications().lockIfPresent(id, application -> {
List<DeploymentSpec.Step> steps = application.deploymentSpec().steps().isEmpty()
? Collections.singletonList(new DeploymentSpec.DeclaredZone(Environment.test))
: application.deploymentSpec().steps();
Optional<Instant> completedAt = Optional.of(clock.instant());
String reason = "Deploying " + application.change();
for (DeploymentSpec.Step step : steps) {
Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet());
Set<JobType> remainingJobs = stepJobs.stream().filter(job -> ! completedAt(application.change(), application, job).isPresent()).collect(toSet());
if (remainingJobs.isEmpty()) {
if (stepJobs.isEmpty()) {
Duration delay = ((DeploymentSpec.Delay) step).duration();
completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant()));
reason += " after a delay of " + delay;
}
else {
completedAt = stepJobs.stream().map(job -> completedAt(application.change(), application, job).get()).max(naturalOrder());
reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", "));
}
}
else if (completedAt.isPresent()) {
for (JobType job : remainingJobs)
jobs.add(deploymentJob(application, job, reason, completedAt.get(), stepJobs));
completedAt = Optional.empty();
break;
}
}
if (completedAt.isPresent())
applications().store(application.withChange(Change.empty()));
});
return jobs;
}
/**
* Returns the instant when the given change is complete for the given application for the given job.
*
* Any job is complete if the given change is already successful on that job.
* A production job is also considered complete if its current change is strictly dominated by what
* is already deployed in its zone, i.e., no parts of the change are upgrades, and at least one
* part is a downgrade, regardless of the status of the job.
*/
private Optional<Instant> completedAt(Change change, Application application, JobType jobType) {
Optional<Instant> lastSuccess = application.deploymentJobs().successAt(change, jobType);
if (lastSuccess.isPresent() || ! jobType.isProduction())
return lastSuccess;
return deploymentFor(application, jobType)
.filter(deployment -> ! ( change.upgrades(deployment.version())
|| change.upgrades(deployment.applicationVersion()))
&& ( change.downgrades(deployment.version())
|| change.downgrades(deployment.applicationVersion())))
.map(Deployment::at);
}
private boolean canTrigger(Job job) {
Application application = applications().require(job.id);
if ( ! application.deploymentJobs().isDeployableTo(job.jobType.environment(), application.change()))
return false;
if (isRunning(application, job.jobType))
return false;
if ( ! job.jobType.isProduction())
return true;
if ( ! job.concurrentlyWith.containsAll(runningProductionJobsFor(application)))
return false;
if ( ! application.changeAt(clock.instant()).isPresent())
return false;
return true;
}
private List<JobType> runningProductionJobsFor(Application application) {
return application.deploymentJobs().jobStatus().keySet().parallelStream()
.filter(job -> job.isProduction())
.filter(job -> isRunning(application, job))
.collect(Collectors.toList());
}
private ApplicationController applications() {
return controller.applications();
}
private boolean acceptNewApplicationVersion(LockedApplication application) {
if (application.change().application().isPresent()) return true;
if (application.deploymentJobs().hasFailures()) return true;
return ! application.changeAt(clock.instant()).platform().isPresent();
}
private Optional<Deployment> deploymentFor(Application application, JobType jobType) {
return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()).get()));
}
public static class Job {
private final ApplicationId id;
private final JobType jobType;
private final long projectId;
private final String reason;
private final Instant availableSince;
private final Collection<JobType> concurrentlyWith;
private final boolean isRetry;
private final boolean isApplicationUpgrade;
private final Change change;
private final Version platformVersion;
private final ApplicationVersion applicationVersion;
private Job(Application application, JobType jobType, String reason, Instant availableSince, Collection<JobType> concurrentlyWith, boolean isRetry, Change change, Version platformVersion, ApplicationVersion applicationVersion) {
this.id = application.id();
this.jobType = jobType;
this.projectId = application.deploymentJobs().projectId().get();
this.availableSince = availableSince;
this.concurrentlyWith = concurrentlyWith;
this.reason = reason;
this.isRetry = isRetry;
this.isApplicationUpgrade = change.application().isPresent();
this.change = change;
this.platformVersion = platformVersion;
this.applicationVersion = applicationVersion;
}
public ApplicationId id() { return id; }
public JobType jobType() { return jobType; }
public long projectId() { return projectId; }
public String reason() { return reason; }
public Instant availableSince() { return availableSince; }
public boolean isRetry() { return isRetry; }
public boolean applicationUpgrade() { return isApplicationUpgrade; }
public Change change() { return change; }
public Version platform() { return platformVersion; }
public ApplicationVersion application() { return applicationVersion; }
}
} | class DeploymentTrigger {
private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName());
private final Controller controller;
private final Clock clock;
private final DeploymentOrder order;
private final BuildService buildService;
public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) {
Objects.requireNonNull(controller, "controller cannot be null");
Objects.requireNonNull(curator, "curator cannot be null");
Objects.requireNonNull(clock, "clock cannot be null");
this.controller = controller;
this.clock = clock;
this.order = new DeploymentOrder(controller::system);
this.buildService = buildService;
}
public DeploymentOrder deploymentOrder() {
return order;
}
/**
* Called each time a job completes (successfully or not) to record information used when deciding what to trigger.
*/
public void notifyOfCompletion(JobReport report) {
log.log(LogLevel.INFO, String.format("Got notified of %s for %s of %s (%d).",
report.jobError().map(JobError::toString).orElse("success"),
report.jobType(),
report.applicationId(),
report.projectId()));
if ( ! applications().get(report.applicationId()).isPresent()) {
log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() +
"': Unknown application '" + report.applicationId() + "'");
return;
}
applications().lockOrThrow(report.applicationId(), application -> {
ApplicationVersion applicationVersion = report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber()))
.orElse(ApplicationVersion.unknown);
application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller);
application = application.withProjectId(Optional.of(report.projectId()));
if (report.jobType() == JobType.component && report.success()) {
if (acceptNewApplicationVersion(application))
application = application.withChange(application.change().with(applicationVersion));
else
application = application.withOutstandingChange(Change.of(applicationVersion));
}
applications().store(application);
});
}
/**
* Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs.
*
* Only one job is triggered each run for test jobs, since their environments have limited capacity.
*/
public long triggerReadyJobs() {
return computeReadyJobs().collect(partitioningBy(job -> job.jobType().isTest()))
.entrySet().stream()
.flatMap(entry -> (entry.getKey()
? entry.getValue().stream()
.sorted(comparing(Job::isRetry)
.thenComparing(Job::applicationUpgrade)
.reversed()
.thenComparing(Job::availableSince))
.collect(groupingBy(Job::jobType))
: entry.getValue().stream()
.collect(groupingBy(Job::id)))
.values().stream()
.map(jobs -> (Supplier<Long>) jobs.stream()
.filter(job -> canTrigger(job) && trigger(job))
.limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count))
.parallel().map(Supplier::get).reduce(0L, Long::sum);
}
/**
* Attempts to trigger the given job for the given application and returns the outcome.
*
* If the build service can not find the given job, or claims it is illegal to trigger it,
* the project id is removed from the application owning the job, to prevent further trigger attemps.
*/
/**
* Triggers a change of this application
*
* @param applicationId the application to trigger
* @throws IllegalArgumentException if this application already has an ongoing change
*/
public void triggerChange(ApplicationId applicationId, Change change) {
applications().lockOrThrow(applicationId, application -> {
if (application.change().isPresent() && ! application.deploymentJobs().hasFailures())
throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " +
application.change() + " is already in progress");
application = application.withChange(change);
if (change.application().isPresent())
application = application.withOutstandingChange(Change.empty());
applications().store(application);
});
}
/** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */
public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) {
applications().lockOrThrow(applicationId, application -> {
applications().store(application.withChange(application.change().application()
.map(Change::of)
.filter(change -> keepApplicationChange)
.orElse(Change.empty())));
});
}
/** Returns the set of all jobs which have changes to propagate from the upstream steps, sorted by job. */
public Stream<Job> computeReadyJobs() {
return ApplicationList.from(applications().asList())
.notPullRequest()
.withProjectId()
.deploying()
.idList().stream()
.map(this::computeReadyJobs)
.flatMap(List::stream);
}
/** Returns whether the given job is currently running; false if completed since last triggered, asking the build service othewise. */
public boolean isRunning(Application application, JobType jobType) {
return ! application.deploymentJobs().statusOf(jobType)
.flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))).orElse(false)
&& buildService.isRunning(new BuildService.BuildJob(application.deploymentJobs().projectId().get(), jobType.jobName()));
}
public Job forcedDeploymentJob(Application application, JobType jobType, String reason) {
return deploymentJob(application, jobType, reason, clock.instant(), Collections.emptySet());
}
private Job deploymentJob(Application application, JobType jobType, String reason, Instant availableSince, Collection<JobType> concurrentlyWith) {
boolean isRetry = application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::jobError)
.filter(JobError.outOfCapacity::equals).isPresent();
if (isRetry) reason += "; retrying on out of capacity";
Change change = application.change();
Version platform = jobType == JobType.component
? Version.emptyVersion
: deploymentFor(application, jobType).map(Deployment::version)
.filter(version -> ! change.upgrades(version))
.orElse(change.platform()
.orElse(application.oldestDeployedPlatform()
.orElse(controller.systemVersion())));
ApplicationVersion applicationVersion = jobType == JobType.component
? ApplicationVersion.unknown
: deploymentFor(application, jobType).map(Deployment::applicationVersion)
.filter(version -> ! change.upgrades(version))
.orElse(change.application()
.orElseGet(() -> application.oldestDeployedApplication()
.orElseThrow(() -> new IllegalArgumentException("Cannot determine application version to use for " + jobType))));
return new Job(application, jobType, reason, availableSince, concurrentlyWith, isRetry, change, platform, applicationVersion);
}
/**
* Finds the next step to trigger for the given application, if any, and returns these as a list.
*/
private List<Job> computeReadyJobs(ApplicationId id) {
List<Job> jobs = new ArrayList<>();
applications().lockIfPresent(id, application -> {
List<DeploymentSpec.Step> steps = application.deploymentSpec().steps().isEmpty()
? Collections.singletonList(new DeploymentSpec.DeclaredZone(Environment.test))
: application.deploymentSpec().steps();
Optional<Instant> completedAt = Optional.of(clock.instant());
String reason = "Deploying " + application.change();
for (DeploymentSpec.Step step : steps) {
Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet());
Set<JobType> remainingJobs = stepJobs.stream().filter(job -> ! completedAt(application.change(), application, job).isPresent()).collect(toSet());
if (remainingJobs.isEmpty()) {
if (stepJobs.isEmpty()) {
Duration delay = ((DeploymentSpec.Delay) step).duration();
completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant()));
reason += " after a delay of " + delay;
}
else {
completedAt = stepJobs.stream().map(job -> completedAt(application.change(), application, job).get()).max(naturalOrder());
reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", "));
}
}
else if (completedAt.isPresent()) {
for (JobType job : remainingJobs)
jobs.add(deploymentJob(application, job, reason, completedAt.get(), stepJobs));
completedAt = Optional.empty();
break;
}
}
if (completedAt.isPresent())
applications().store(application.withChange(Change.empty()));
});
return jobs;
}
/**
* Returns the instant when the given change is complete for the given application for the given job.
*
* Any job is complete if the given change is already successful on that job.
* A production job is also considered complete if its current change is strictly dominated by what
* is already deployed in its zone, i.e., no parts of the change are upgrades, and at least one
* part is a downgrade, regardless of the status of the job.
*/
private Optional<Instant> completedAt(Change change, Application application, JobType jobType) {
Optional<Instant> lastSuccess = application.deploymentJobs().successAt(change, jobType);
if (lastSuccess.isPresent() || ! jobType.isProduction())
return lastSuccess;
return deploymentFor(application, jobType)
.filter(deployment -> ! ( change.upgrades(deployment.version())
|| change.upgrades(deployment.applicationVersion()))
&& ( change.downgrades(deployment.version())
|| change.downgrades(deployment.applicationVersion())))
.map(Deployment::at);
}
private boolean canTrigger(Job job) {
Application application = applications().require(job.id);
if ( ! application.deploymentJobs().isDeployableTo(job.jobType.environment(), application.change()))
return false;
if (isRunning(application, job.jobType))
return false;
if ( ! job.jobType.isProduction())
return true;
if ( ! job.concurrentlyWith.containsAll(runningProductionJobsFor(application)))
return false;
if ( ! application.changeAt(clock.instant()).isPresent())
return false;
return true;
}
private List<JobType> runningProductionJobsFor(Application application) {
return application.deploymentJobs().jobStatus().keySet().parallelStream()
.filter(job -> job.isProduction())
.filter(job -> isRunning(application, job))
.collect(Collectors.toList());
}
private ApplicationController applications() {
return controller.applications();
}
private boolean acceptNewApplicationVersion(LockedApplication application) {
if (application.change().application().isPresent()) return true;
if (application.deploymentJobs().hasFailures()) return true;
return ! application.changeAt(clock.instant()).platform().isPresent();
}
private Optional<Deployment> deploymentFor(Application application, JobType jobType) {
return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()).get()));
}
public static class Job {
private final ApplicationId id;
private final JobType jobType;
private final long projectId;
private final String reason;
private final Instant availableSince;
private final Collection<JobType> concurrentlyWith;
private final boolean isRetry;
private final boolean isApplicationUpgrade;
private final Change change;
private final Version platformVersion;
private final ApplicationVersion applicationVersion;
private Job(Application application, JobType jobType, String reason, Instant availableSince, Collection<JobType> concurrentlyWith, boolean isRetry, Change change, Version platformVersion, ApplicationVersion applicationVersion) {
this.id = application.id();
this.jobType = jobType;
this.projectId = application.deploymentJobs().projectId().get();
this.availableSince = availableSince;
this.concurrentlyWith = concurrentlyWith;
this.reason = reason;
this.isRetry = isRetry;
this.isApplicationUpgrade = change.application().isPresent();
this.change = change;
this.platformVersion = platformVersion;
this.applicationVersion = applicationVersion;
}
public ApplicationId id() { return id; }
public JobType jobType() { return jobType; }
public long projectId() { return projectId; }
public String reason() { return reason; }
public Instant availableSince() { return availableSince; }
public boolean isRetry() { return isRetry; }
public boolean applicationUpgrade() { return isApplicationUpgrade; }
public Change change() { return change; }
public Version platform() { return platformVersion; }
public ApplicationVersion application() { return applicationVersion; }
}
} |
Wat. Yes. | public boolean trigger(Job job) {
log.log(LogLevel.INFO, String.format("Attempting to trigger %s for %s, deploying %s: %s (platform: %s, application: %s)", job.jobType, job.id, job.change, job.reason, job.platformVersion, job.applicationVersion.id()));
try {
buildService.trigger(new BuildService.BuildJob(job.projectId, job.jobType.jobName()));
applications().lockOrThrow(job.id, application -> applications().store(application.withJobTriggering(
job.jobType, new JobStatus.JobRun(-1, job.platformVersion, job.applicationVersion, job.reason, clock.instant()))));
return true;
}
catch (RuntimeException e) {
if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException)
applications().lockOrThrow(job.id, application -> applications().store(application.withProjectId(Optional.empty())));
log.log(LogLevel.WARNING, String.format("Exception triggering %s for %s (%s): %s", job.jobType, job.id, job.projectId, e));
return false;
}
} | applications().lockOrThrow(job.id, application -> applications().store(application.withProjectId(Optional.empty()))); | public boolean trigger(Job job) {
log.log(LogLevel.INFO, String.format("Attempting to trigger %s for %s, deploying %s: %s (platform: %s, application: %s)", job.jobType, job.id, job.change, job.reason, job.platformVersion, job.applicationVersion.id()));
try {
buildService.trigger(new BuildService.BuildJob(job.projectId, job.jobType.jobName()));
applications().lockOrThrow(job.id, application -> applications().store(application.withJobTriggering(
job.jobType, new JobStatus.JobRun(-1, job.platformVersion, job.applicationVersion, job.reason, clock.instant()))));
return true;
}
catch (RuntimeException e) {
if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException)
applications().lockOrThrow(job.id, application -> applications().store(application.withProjectId(Optional.empty())));
log.log(LogLevel.WARNING, String.format("Exception triggering %s for %s (%s): %s", job.jobType, job.id, job.projectId, e));
return false;
}
} | class DeploymentTrigger {
private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName());
private final Controller controller;
private final Clock clock;
private final DeploymentOrder order;
private final BuildService buildService;
public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) {
Objects.requireNonNull(controller, "controller cannot be null");
Objects.requireNonNull(curator, "curator cannot be null");
Objects.requireNonNull(clock, "clock cannot be null");
this.controller = controller;
this.clock = clock;
this.order = new DeploymentOrder(controller::system);
this.buildService = buildService;
}
public DeploymentOrder deploymentOrder() {
return order;
}
/**
* Called each time a job completes (successfully or not) to record information used when deciding what to trigger.
*/
public void notifyOfCompletion(JobReport report) {
log.log(LogLevel.INFO, String.format("Got notified of %s for %s of %s (%d).",
report.jobError().map(JobError::toString).orElse("success"),
report.jobType(),
report.applicationId(),
report.projectId()));
if ( ! applications().get(report.applicationId()).isPresent()) {
log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() +
"': Unknown application '" + report.applicationId() + "'");
return;
}
applications().lockOrThrow(report.applicationId(), application -> {
ApplicationVersion applicationVersion = report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber()))
.orElse(ApplicationVersion.unknown);
application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller);
application = application.withProjectId(Optional.of(report.projectId()));
if (report.jobType() == JobType.component && report.success()) {
if (acceptNewApplicationVersion(application))
application = application.withChange(application.change().with(applicationVersion));
else
application = application.withOutstandingChange(Change.of(applicationVersion));
}
applications().store(application);
});
}
/**
* Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs.
*
* Only one job is triggered each run for test jobs, since their environments have limited capacity.
*/
public long triggerReadyJobs() {
return computeReadyJobs().collect(partitioningBy(job -> job.jobType().isTest()))
.entrySet().stream()
.flatMap(entry -> (entry.getKey()
? entry.getValue().stream()
.sorted(comparing(Job::isRetry)
.thenComparing(Job::applicationUpgrade)
.reversed()
.thenComparing(Job::availableSince))
.collect(groupingBy(Job::jobType))
: entry.getValue().stream()
.collect(groupingBy(Job::id)))
.values().stream()
.map(jobs -> (Supplier<Long>) jobs.stream()
.filter(job -> canTrigger(job) && trigger(job))
.limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count))
.parallel().map(Supplier::get).reduce(0L, Long::sum);
}
/**
* Attempts to trigger the given job for the given application and returns the outcome.
*
* If the build service can not find the given job, or claims it is illegal to trigger it,
* the project id is removed from the application owning the job, to prevent further trigger attemps.
*/
/**
* Triggers a change of this application
*
* @param applicationId the application to trigger
* @throws IllegalArgumentException if this application already has an ongoing change
*/
public void triggerChange(ApplicationId applicationId, Change change) {
applications().lockOrThrow(applicationId, application -> {
if (application.change().isPresent() && ! application.deploymentJobs().hasFailures())
throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " +
application.change() + " is already in progress");
application = application.withChange(change);
if (change.application().isPresent())
application = application.withOutstandingChange(Change.empty());
applications().store(application);
});
}
/** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */
public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) {
applications().lockOrThrow(applicationId, application -> {
applications().store(application.withChange(application.change().application()
.map(Change::of)
.filter(change -> keepApplicationChange)
.orElse(Change.empty())));
});
}
/** Returns the set of all jobs which have changes to propagate from the upstream steps, sorted by job. */
public Stream<Job> computeReadyJobs() {
return ApplicationList.from(applications().asList())
.notPullRequest()
.withProjectId()
.deploying()
.idList().stream()
.map(this::computeReadyJobs)
.flatMap(List::stream);
}
/** Returns whether the given job is currently running; false if completed since last triggered, asking the build service othewise. */
public boolean isRunning(Application application, JobType jobType) {
return ! application.deploymentJobs().statusOf(jobType)
.flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))).orElse(false)
&& buildService.isRunning(new BuildService.BuildJob(application.deploymentJobs().projectId().get(), jobType.jobName()));
}
public Job forcedDeploymentJob(Application application, JobType jobType, String reason) {
return deploymentJob(application, jobType, reason, clock.instant(), Collections.emptySet());
}
private Job deploymentJob(Application application, JobType jobType, String reason, Instant availableSince, Collection<JobType> concurrentlyWith) {
boolean isRetry = application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::jobError)
.filter(JobError.outOfCapacity::equals).isPresent();
if (isRetry) reason += "; retrying on out of capacity";
Change change = application.change();
Version platform = jobType == JobType.component
? Version.emptyVersion
: deploymentFor(application, jobType).map(Deployment::version)
.filter(version -> ! change.upgrades(version))
.orElse(change.platform()
.orElse(application.oldestDeployedPlatform()
.orElse(controller.systemVersion())));
ApplicationVersion applicationVersion = jobType == JobType.component
? ApplicationVersion.unknown
: deploymentFor(application, jobType).map(Deployment::applicationVersion)
.filter(version -> ! change.upgrades(version))
.orElse(change.application()
.orElseGet(() -> application.oldestDeployedApplication()
.orElseThrow(() -> new IllegalArgumentException("Cannot determine application version to use for " + jobType))));
return new Job(application, jobType, reason, availableSince, concurrentlyWith, isRetry, change, platform, applicationVersion);
}
/**
* Finds the next step to trigger for the given application, if any, and returns these as a list.
*/
private List<Job> computeReadyJobs(ApplicationId id) {
List<Job> jobs = new ArrayList<>();
applications().lockIfPresent(id, application -> {
List<DeploymentSpec.Step> steps = application.deploymentSpec().steps().isEmpty()
? Collections.singletonList(new DeploymentSpec.DeclaredZone(Environment.test))
: application.deploymentSpec().steps();
Optional<Instant> completedAt = Optional.of(clock.instant());
String reason = "Deploying " + application.change();
for (DeploymentSpec.Step step : steps) {
Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet());
Set<JobType> remainingJobs = stepJobs.stream().filter(job -> ! completedAt(application.change(), application, job).isPresent()).collect(toSet());
if (remainingJobs.isEmpty()) {
if (stepJobs.isEmpty()) {
Duration delay = ((DeploymentSpec.Delay) step).duration();
completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant()));
reason += " after a delay of " + delay;
}
else {
completedAt = stepJobs.stream().map(job -> completedAt(application.change(), application, job).get()).max(naturalOrder());
reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", "));
}
}
else if (completedAt.isPresent()) {
for (JobType job : remainingJobs)
jobs.add(deploymentJob(application, job, reason, completedAt.get(), stepJobs));
completedAt = Optional.empty();
break;
}
}
if (completedAt.isPresent())
applications().store(application.withChange(Change.empty()));
});
return jobs;
}
/**
* Returns the instant when the given change is complete for the given application for the given job.
*
* Any job is complete if the given change is already successful on that job.
* A production job is also considered complete if its current change is strictly dominated by what
* is already deployed in its zone, i.e., no parts of the change are upgrades, and at least one
* part is a downgrade, regardless of the status of the job.
*/
private Optional<Instant> completedAt(Change change, Application application, JobType jobType) {
Optional<Instant> lastSuccess = application.deploymentJobs().successAt(change, jobType);
if (lastSuccess.isPresent() || ! jobType.isProduction())
return lastSuccess;
return deploymentFor(application, jobType)
.filter(deployment -> ! ( change.upgrades(deployment.version())
|| change.upgrades(deployment.applicationVersion()))
&& ( change.downgrades(deployment.version())
|| change.downgrades(deployment.applicationVersion())))
.map(Deployment::at);
}
private boolean canTrigger(Job job) {
Application application = applications().require(job.id);
if ( ! application.deploymentJobs().isDeployableTo(job.jobType.environment(), application.change()))
return false;
if (isRunning(application, job.jobType))
return false;
if ( ! job.jobType.isProduction())
return true;
if ( ! job.concurrentlyWith.containsAll(runningProductionJobsFor(application)))
return false;
if ( ! application.changeAt(clock.instant()).isPresent())
return false;
return true;
}
private List<JobType> runningProductionJobsFor(Application application) {
return application.deploymentJobs().jobStatus().keySet().parallelStream()
.filter(job -> job.isProduction())
.filter(job -> isRunning(application, job))
.collect(Collectors.toList());
}
private ApplicationController applications() {
return controller.applications();
}
private boolean acceptNewApplicationVersion(LockedApplication application) {
if (application.change().application().isPresent()) return true;
if (application.deploymentJobs().hasFailures()) return true;
return ! application.changeAt(clock.instant()).platform().isPresent();
}
private Optional<Deployment> deploymentFor(Application application, JobType jobType) {
return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()).get()));
}
public static class Job {
private final ApplicationId id;
private final JobType jobType;
private final long projectId;
private final String reason;
private final Instant availableSince;
private final Collection<JobType> concurrentlyWith;
private final boolean isRetry;
private final boolean isApplicationUpgrade;
private final Change change;
private final Version platformVersion;
private final ApplicationVersion applicationVersion;
private Job(Application application, JobType jobType, String reason, Instant availableSince, Collection<JobType> concurrentlyWith, boolean isRetry, Change change, Version platformVersion, ApplicationVersion applicationVersion) {
this.id = application.id();
this.jobType = jobType;
this.projectId = application.deploymentJobs().projectId().get();
this.availableSince = availableSince;
this.concurrentlyWith = concurrentlyWith;
this.reason = reason;
this.isRetry = isRetry;
this.isApplicationUpgrade = change.application().isPresent();
this.change = change;
this.platformVersion = platformVersion;
this.applicationVersion = applicationVersion;
}
public ApplicationId id() { return id; }
public JobType jobType() { return jobType; }
public long projectId() { return projectId; }
public String reason() { return reason; }
public Instant availableSince() { return availableSince; }
public boolean isRetry() { return isRetry; }
public boolean applicationUpgrade() { return isApplicationUpgrade; }
public Change change() { return change; }
public Version platform() { return platformVersion; }
public ApplicationVersion application() { return applicationVersion; }
}
} | class DeploymentTrigger {
private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName());
private final Controller controller;
private final Clock clock;
private final DeploymentOrder order;
private final BuildService buildService;
public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) {
Objects.requireNonNull(controller, "controller cannot be null");
Objects.requireNonNull(curator, "curator cannot be null");
Objects.requireNonNull(clock, "clock cannot be null");
this.controller = controller;
this.clock = clock;
this.order = new DeploymentOrder(controller::system);
this.buildService = buildService;
}
public DeploymentOrder deploymentOrder() {
return order;
}
/**
* Called each time a job completes (successfully or not) to record information used when deciding what to trigger.
*/
public void notifyOfCompletion(JobReport report) {
log.log(LogLevel.INFO, String.format("Got notified of %s for %s of %s (%d).",
report.jobError().map(JobError::toString).orElse("success"),
report.jobType(),
report.applicationId(),
report.projectId()));
if ( ! applications().get(report.applicationId()).isPresent()) {
log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() +
"': Unknown application '" + report.applicationId() + "'");
return;
}
applications().lockOrThrow(report.applicationId(), application -> {
ApplicationVersion applicationVersion = report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber()))
.orElse(ApplicationVersion.unknown);
application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller);
application = application.withProjectId(Optional.of(report.projectId()));
if (report.jobType() == JobType.component && report.success()) {
if (acceptNewApplicationVersion(application))
application = application.withChange(application.change().with(applicationVersion));
else
application = application.withOutstandingChange(Change.of(applicationVersion));
}
applications().store(application);
});
}
/**
* Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs.
*
* Only one job is triggered each run for test jobs, since their environments have limited capacity.
*/
public long triggerReadyJobs() {
return computeReadyJobs().collect(partitioningBy(job -> job.jobType().isTest()))
.entrySet().stream()
.flatMap(entry -> (entry.getKey()
? entry.getValue().stream()
.sorted(comparing(Job::isRetry)
.thenComparing(Job::applicationUpgrade)
.reversed()
.thenComparing(Job::availableSince))
.collect(groupingBy(Job::jobType))
: entry.getValue().stream()
.collect(groupingBy(Job::id)))
.values().stream()
.map(jobs -> (Supplier<Long>) jobs.stream()
.filter(job -> canTrigger(job) && trigger(job))
.limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count))
.parallel().map(Supplier::get).reduce(0L, Long::sum);
}
/**
* Attempts to trigger the given job for the given application and returns the outcome.
*
* If the build service can not find the given job, or claims it is illegal to trigger it,
* the project id is removed from the application owning the job, to prevent further trigger attemps.
*/
/**
* Triggers a change of this application
*
* @param applicationId the application to trigger
* @throws IllegalArgumentException if this application already has an ongoing change
*/
public void triggerChange(ApplicationId applicationId, Change change) {
applications().lockOrThrow(applicationId, application -> {
if (application.change().isPresent() && ! application.deploymentJobs().hasFailures())
throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " +
application.change() + " is already in progress");
application = application.withChange(change);
if (change.application().isPresent())
application = application.withOutstandingChange(Change.empty());
applications().store(application);
});
}
/** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */
public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) {
applications().lockOrThrow(applicationId, application -> {
applications().store(application.withChange(application.change().application()
.map(Change::of)
.filter(change -> keepApplicationChange)
.orElse(Change.empty())));
});
}
/** Returns the set of all jobs which have changes to propagate from the upstream steps, sorted by job. */
public Stream<Job> computeReadyJobs() {
return ApplicationList.from(applications().asList())
.notPullRequest()
.withProjectId()
.deploying()
.idList().stream()
.map(this::computeReadyJobs)
.flatMap(List::stream);
}
/** Returns whether the given job is currently running; false if completed since last triggered, asking the build service othewise. */
public boolean isRunning(Application application, JobType jobType) {
return ! application.deploymentJobs().statusOf(jobType)
.flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))).orElse(false)
&& buildService.isRunning(new BuildService.BuildJob(application.deploymentJobs().projectId().get(), jobType.jobName()));
}
public Job forcedDeploymentJob(Application application, JobType jobType, String reason) {
return deploymentJob(application, jobType, reason, clock.instant(), Collections.emptySet());
}
private Job deploymentJob(Application application, JobType jobType, String reason, Instant availableSince, Collection<JobType> concurrentlyWith) {
boolean isRetry = application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::jobError)
.filter(JobError.outOfCapacity::equals).isPresent();
if (isRetry) reason += "; retrying on out of capacity";
Change change = application.change();
Version platform = jobType == JobType.component
? Version.emptyVersion
: deploymentFor(application, jobType).map(Deployment::version)
.filter(version -> ! change.upgrades(version))
.orElse(change.platform()
.orElse(application.oldestDeployedPlatform()
.orElse(controller.systemVersion())));
ApplicationVersion applicationVersion = jobType == JobType.component
? ApplicationVersion.unknown
: deploymentFor(application, jobType).map(Deployment::applicationVersion)
.filter(version -> ! change.upgrades(version))
.orElse(change.application()
.orElseGet(() -> application.oldestDeployedApplication()
.orElseThrow(() -> new IllegalArgumentException("Cannot determine application version to use for " + jobType))));
return new Job(application, jobType, reason, availableSince, concurrentlyWith, isRetry, change, platform, applicationVersion);
}
/**
* Finds the next step to trigger for the given application, if any, and returns these as a list.
*/
private List<Job> computeReadyJobs(ApplicationId id) {
List<Job> jobs = new ArrayList<>();
applications().lockIfPresent(id, application -> {
List<DeploymentSpec.Step> steps = application.deploymentSpec().steps().isEmpty()
? Collections.singletonList(new DeploymentSpec.DeclaredZone(Environment.test))
: application.deploymentSpec().steps();
Optional<Instant> completedAt = Optional.of(clock.instant());
String reason = "Deploying " + application.change();
for (DeploymentSpec.Step step : steps) {
Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet());
Set<JobType> remainingJobs = stepJobs.stream().filter(job -> ! completedAt(application.change(), application, job).isPresent()).collect(toSet());
if (remainingJobs.isEmpty()) {
if (stepJobs.isEmpty()) {
Duration delay = ((DeploymentSpec.Delay) step).duration();
completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant()));
reason += " after a delay of " + delay;
}
else {
completedAt = stepJobs.stream().map(job -> completedAt(application.change(), application, job).get()).max(naturalOrder());
reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", "));
}
}
else if (completedAt.isPresent()) {
for (JobType job : remainingJobs)
jobs.add(deploymentJob(application, job, reason, completedAt.get(), stepJobs));
completedAt = Optional.empty();
break;
}
}
if (completedAt.isPresent())
applications().store(application.withChange(Change.empty()));
});
return jobs;
}
/**
* Returns the instant when the given change is complete for the given application for the given job.
*
* Any job is complete if the given change is already successful on that job.
* A production job is also considered complete if its current change is strictly dominated by what
* is already deployed in its zone, i.e., no parts of the change are upgrades, and at least one
* part is a downgrade, regardless of the status of the job.
*/
private Optional<Instant> completedAt(Change change, Application application, JobType jobType) {
Optional<Instant> lastSuccess = application.deploymentJobs().successAt(change, jobType);
if (lastSuccess.isPresent() || ! jobType.isProduction())
return lastSuccess;
return deploymentFor(application, jobType)
.filter(deployment -> ! ( change.upgrades(deployment.version())
|| change.upgrades(deployment.applicationVersion()))
&& ( change.downgrades(deployment.version())
|| change.downgrades(deployment.applicationVersion())))
.map(Deployment::at);
}
private boolean canTrigger(Job job) {
Application application = applications().require(job.id);
if ( ! application.deploymentJobs().isDeployableTo(job.jobType.environment(), application.change()))
return false;
if (isRunning(application, job.jobType))
return false;
if ( ! job.jobType.isProduction())
return true;
if ( ! job.concurrentlyWith.containsAll(runningProductionJobsFor(application)))
return false;
if ( ! application.changeAt(clock.instant()).isPresent())
return false;
return true;
}
private List<JobType> runningProductionJobsFor(Application application) {
return application.deploymentJobs().jobStatus().keySet().parallelStream()
.filter(job -> job.isProduction())
.filter(job -> isRunning(application, job))
.collect(Collectors.toList());
}
private ApplicationController applications() {
return controller.applications();
}
private boolean acceptNewApplicationVersion(LockedApplication application) {
if (application.change().application().isPresent()) return true;
if (application.deploymentJobs().hasFailures()) return true;
return ! application.changeAt(clock.instant()).platform().isPresent();
}
private Optional<Deployment> deploymentFor(Application application, JobType jobType) {
return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()).get()));
}
public static class Job {
private final ApplicationId id;
private final JobType jobType;
private final long projectId;
private final String reason;
private final Instant availableSince;
private final Collection<JobType> concurrentlyWith;
private final boolean isRetry;
private final boolean isApplicationUpgrade;
private final Change change;
private final Version platformVersion;
private final ApplicationVersion applicationVersion;
private Job(Application application, JobType jobType, String reason, Instant availableSince, Collection<JobType> concurrentlyWith, boolean isRetry, Change change, Version platformVersion, ApplicationVersion applicationVersion) {
this.id = application.id();
this.jobType = jobType;
this.projectId = application.deploymentJobs().projectId().get();
this.availableSince = availableSince;
this.concurrentlyWith = concurrentlyWith;
this.reason = reason;
this.isRetry = isRetry;
this.isApplicationUpgrade = change.application().isPresent();
this.change = change;
this.platformVersion = platformVersion;
this.applicationVersion = applicationVersion;
}
public ApplicationId id() { return id; }
public JobType jobType() { return jobType; }
public long projectId() { return projectId; }
public String reason() { return reason; }
public Instant availableSince() { return availableSince; }
public boolean isRetry() { return isRetry; }
public boolean applicationUpgrade() { return isApplicationUpgrade; }
public Change change() { return change; }
public Version platform() { return platformVersion; }
public ApplicationVersion application() { return applicationVersion; }
}
} |
Copy/paste error. I think you mean 'document-processing'. | public DocprocChains getDocprocChains() {
if (containerDocproc == null)
throw new IllegalStateException("Search components not found in container cluster '" + getSubId() +
"': Add <document-processing/> to the cluster in services.xml");
return containerDocproc.getChains();
} | throw new IllegalStateException("Search components not found in container cluster '" + getSubId() + | public DocprocChains getDocprocChains() {
if (containerDocproc == null)
throw new IllegalStateException("Document processing components not found in container cluster '" + getSubId() +
"': Add <document-processing/> to the cluster in services.xml");
return containerDocproc.getChains();
} | class is not accessible
ProcessingHandler<?> processingHandler = new ProcessingHandler<>(
processingChains,
"com.yahoo.processing.handler.ProcessingHandler");
for (String binding: serverBindings)
processingHandler.addServerBindings(binding);
addComponent(processingHandler);
}
public ProcessingChains getProcessingChains() {
return processingChains;
} | class is not accessible
ProcessingHandler<?> processingHandler = new ProcessingHandler<>(
processingChains,
"com.yahoo.processing.handler.ProcessingHandler");
for (String binding: serverBindings)
processingHandler.addServerBindings(binding);
addComponent(processingHandler);
}
public ProcessingChains getProcessingChains() {
return processingChains;
} |
Again, you could use a helper to avoid the newlines. | public void testModelWithReferencedIndexingCluster() {
String services =
"<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
"<services version=\"1.0\">\n" +
"\n" +
" <admin version=\"2.0\">\n" +
" <adminserver hostalias=\"vespa-1\"/>\n" +
" <configservers>\n" +
" <configserver hostalias=\"vespa-1\"/>\n" +
" </configservers>\n" +
" </admin>\n" +
"\n" +
" <container id=\"container\" version=\"1.0\">\n" +
" <document-processing/>\n" +
" <document-api/>\n" +
" <search/>\n" +
" <nodes jvmargs=\"-Xms512m -Xmx512m\">\n" +
" <node hostalias=\"vespa-1\"/>\n" +
" </nodes>\n" +
" </container>\n" +
"\n" +
" <content id=\"storage\" version=\"1.0\">\n" +
" <search>\n" +
" <visibility-delay>1.0</visibility-delay>\n" +
" </search>\n" +
" <redundancy>2</redundancy>\n" +
" <documents>\n" +
" <document type=\"type1\" mode=\"index\"/>\n" +
" <document-processing cluster=\"container\"/>\n" +
" </documents>\n" +
" <nodes>\n" +
" <node hostalias=\"vespa-1\" distribution-key=\"0\"/>\n" +
" </nodes>\n" +
" </content>\n" +
"\n" +
"</services>";
VespaModel model = createNonProvisionedMultitenantModel(services);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(1));
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(1, content.getRootGroup().getNodes().size());
ContainerCluster controller = content.getClusterControllers();
assertEquals(1, controller.getContainers().size());
} | "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + | public void testModelWithReferencedIndexingCluster() {
String services =
"<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
"<services version=\"1.0\">\n" +
"\n" +
" <admin version=\"2.0\">\n" +
" <adminserver hostalias=\"vespa-1\"/>\n" +
" <configservers>\n" +
" <configserver hostalias=\"vespa-1\"/>\n" +
" </configservers>\n" +
" </admin>\n" +
"\n" +
" <container id=\"container\" version=\"1.0\">\n" +
" <document-processing/>\n" +
" <document-api/>\n" +
" <search/>\n" +
" <nodes jvmargs=\"-Xms512m -Xmx512m\">\n" +
" <node hostalias=\"vespa-1\"/>\n" +
" </nodes>\n" +
" </container>\n" +
"\n" +
" <content id=\"storage\" version=\"1.0\">\n" +
" <search>\n" +
" <visibility-delay>1.0</visibility-delay>\n" +
" </search>\n" +
" <redundancy>2</redundancy>\n" +
" <documents>\n" +
" <document type=\"type1\" mode=\"index\"/>\n" +
" <document-processing cluster=\"container\"/>\n" +
" </documents>\n" +
" <nodes>\n" +
" <node hostalias=\"vespa-1\" distribution-key=\"0\"/>\n" +
" </nodes>\n" +
" </content>\n" +
"\n" +
"</services>";
VespaModel model = createNonProvisionedMultitenantModel(services);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(1));
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(1, content.getRootGroup().getNodes().size());
ContainerCluster controller = content.getClusterControllers();
assertEquals(1, controller.getContainers().size());
} | class ModelProvisioningTest {
@Test
public void testNodeCountForJdisc() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>\n" +
"\n" +
"<admin version='3.0'><nodes count='1' /></admin>\n" +
"<jdisc id='mydisc' version='1.0'>" +
" <handler id='myHandler'>" +
" <component id='injected' />" +
" </handler>" +
" <nodes count=\"3\"/>" +
"</jdisc>" +
"<jdisc id='mydisc2' version='1.0'>" +
" <document-processing/>" +
" <handler id='myHandler'>" +
" <component id='injected' />" +
" </handler>" +
" <nodes count='2' allocated-memory='45%' jvmargs='-verbosegc' preload='lib/blablamalloc.so'/>" +
"</jdisc>" +
"</services>";
String hosts ="<hosts>"
+ " <host name='myhost0'>"
+ " <alias>node0</alias>"
+ " </host>"
+ " <host name='myhost1'>"
+ " <alias>node1</alias>"
+ " </host>"
+ " <host name='myhost2'>"
+ " <alias>node2</alias>"
+ " </host>"
+ " <host name='myhost3'>"
+ " <alias>node3</alias>"
+ " </host>"
+ " <host name='myhost4'>"
+ " <alias>node4</alias>"
+ " </host>"
+ " <host name='myhost5'>"
+ " <alias>node5</alias>"
+ " </host>"
+ "</hosts>";
VespaModelCreatorWithMockPkg creator = new VespaModelCreatorWithMockPkg(null, services);
VespaModel model = creator.create(new DeployState.Builder().modelHostProvisioner(new InMemoryProvisioner(Hosts.readFrom(new StringReader(hosts)), true)));
assertThat(model.getContainerClusters().get("mydisc").getContainers().size(), is(3));
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(0).getConfigId(), is("mydisc/container.0"));
assertTrue(model.getContainerClusters().get("mydisc").getContainers().get(0).isInitialized());
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(1).getConfigId(), is("mydisc/container.1"));
assertTrue(model.getContainerClusters().get("mydisc").getContainers().get(1).isInitialized());
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(2).getConfigId(), is("mydisc/container.2"));
assertTrue(model.getContainerClusters().get("mydisc").getContainers().get(2).isInitialized());
assertThat(model.getContainerClusters().get("mydisc2").getContainers().size(), is(2));
assertThat(model.getContainerClusters().get("mydisc2").getContainers().get(0).getConfigId(), is("mydisc2/container.0"));
assertTrue(model.getContainerClusters().get("mydisc2").getContainers().get(0).isInitialized());
assertThat(model.getContainerClusters().get("mydisc2").getContainers().get(1).getConfigId(), is("mydisc2/container.1"));
assertTrue(model.getContainerClusters().get("mydisc2").getContainers().get(1).isInitialized());
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(0).getJvmArgs(), is(""));
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(1).getJvmArgs(), is(""));
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(2).getJvmArgs(), is(""));
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(0).getPreLoad(), is(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so")));
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(1).getPreLoad(), is(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so")));
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(2).getPreLoad(), is(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so")));
assertThat(model.getContainerClusters().get("mydisc").getMemoryPercentage(), is(Optional.empty()));
assertThat(model.getContainerClusters().get("mydisc2").getContainers().get(0).getJvmArgs(), is("-verbosegc"));
assertThat(model.getContainerClusters().get("mydisc2").getContainers().get(1).getJvmArgs(), is("-verbosegc"));
assertThat(model.getContainerClusters().get("mydisc2").getContainers().get(0).getPreLoad(), is("lib/blablamalloc.so"));
assertThat(model.getContainerClusters().get("mydisc2").getContainers().get(1).getPreLoad(), is("lib/blablamalloc.so"));
assertThat(model.getContainerClusters().get("mydisc2").getMemoryPercentage(), is(Optional.of(45)));
HostSystem hostSystem = model.getHostSystem();
assertNotNull(hostSystem.getHostByHostname("myhost0"));
assertNotNull(hostSystem.getHostByHostname("myhost1"));
assertNotNull(hostSystem.getHostByHostname("myhost2"));
assertNotNull(hostSystem.getHostByHostname("myhost3"));
assertNull(hostSystem.getHostByHostname("Nope"));
}
@Test
public void testNodeCountForContentGroup() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
"\n" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
int numberOfHosts = 2;
tester.addHosts(numberOfHosts);
int numberOfContentNodes = 2;
VespaModel model = tester.createModel(xmlWithNodes, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
final Map<String, ContentCluster> contentClusters = model.getContentClusters();
ContentCluster cluster = contentClusters.get("bar");
assertThat(cluster.getRootGroup().getNodes().size(), is(numberOfContentNodes));
int i = 0;
for (StorageNode node : cluster.getRootGroup().getNodes())
assertEquals(i++, node.getDistributionKey());
}
@Test
public void testSeparateClusters() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes count='1'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(3);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size());
assertEquals("Nodes in container1", 1, model.getContainerClusters().get("container1").getContainers().size());
assertEquals("Heap size for container", 60, physicalMemoryPercentage(model.getContainerClusters().get("container1")));
}
@Test
public void testClusterMembership() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes count='1'/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals(1, model.getHostSystem().getHosts().size());
HostResource host = model.getHostSystem().getHosts().iterator().next();
assertEquals(1, host.clusterMemberships().size());
ClusterMembership membership = host.clusterMemberships().iterator().next();
assertEquals("container", membership.cluster().type().name());
assertEquals("container1", membership.cluster().id().value());
}
@Test
public void testCombinedCluster() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes of='content1'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size());
assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size());
assertEquals("Heap size is lowered with combined clusters",
17, physicalMemoryPercentage(model.getContainerClusters().get("container1")));
}
@Test
public void testCombinedClusterWithJvmArgs() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <document-processing/>" +
" <nodes of='content1' jvmargs='testarg'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size());
assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size());
for (Container container : model.getContainerClusters().get("container1").getContainers())
assertTrue(container.getJvmArgs().contains("testarg"));
}
@Test
public void testMultipleCombinedClusters() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes of='content1'/>" +
" </container>" +
" <container version='1.0' id='container2'>" +
" <nodes of='content2'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
" <content version='1.0' id='content2'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='3'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(5);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size());
assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size());
assertEquals("Nodes in content2", 3, model.getContentClusters().get("content2").getRootGroup().getNodes().size());
assertEquals("Nodes in container2", 3, model.getContainerClusters().get("container2").getContainers().size());
}
@Test
public void testNonExistingCombinedClusterReference() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes of='container2'/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
try {
tester.createModel(xmlWithNodes, true);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("container cluster 'container1' references service 'container2' but this service is not defined", e.getMessage());
}
}
@Test
public void testInvalidCombinedClusterReference() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes of='container2'/><!-- invalid; only content clusters can be referenced -->" +
" </container>" +
" <container version='1.0' id='container2'>" +
" <nodes count='2'/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
try {
tester.createModel(xmlWithNodes, true);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("container cluster 'container1' references service 'container2', but that is not a content service", e.getMessage());
}
}
@Test
public void testUsingNodesAndGroupCountAttributes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='27' groups='9'/>" +
" </content>" +
" <content version='1.0' id='baz'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='27' groups='27'/>" +
" </content>" +
"</services>";
int numberOfHosts = 64;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
assertEquals(1, model.getContainerClusters().size());
Set<com.yahoo.vespa.model.Host> containerHosts = model.getContainerClusters().get("foo").getContainers().stream().map(Container::getHost).collect(Collectors.toSet());
assertEquals(10, containerHosts.size());
Admin admin = model.getAdmin();
Set<com.yahoo.vespa.model.Host> slobrokHosts = admin.getSlobroks().stream().map(Slobrok::getHost).collect(Collectors.toSet());
assertEquals(3, slobrokHosts.size());
assertTrue("Slobroks are assigned from container nodes", containerHosts.containsAll(slobrokHosts));
assertTrue("Logserver is assigned from container nodes", containerHosts.contains(admin.getLogserver().getHost()));
assertEquals("No in-cluster config servers in a hosted environment", 0, admin.getConfigservers().size());
assertEquals("No admin cluster controller when multitenant", null, admin.getClusterControllers());
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(3, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default28", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default31", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default54", clusterControllers.getContainers().get(2).getHostName());
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(9, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(3));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("bar/storage/0"));
assertEquals("default54", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getDistributionKey(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getConfigId(), is("bar/storage/1"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(2).getDistributionKey(), is(2));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(2).getConfigId(), is("bar/storage/2"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(3));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(3));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("bar/storage/3"));
assertEquals("default51", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getDistributionKey(), is(4));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getConfigId(), is("bar/storage/4"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(2).getDistributionKey(), is(5));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(2).getConfigId(), is("bar/storage/5"));
assertEquals("default48", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(8).getIndex(), is("8"));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().size(), is(3));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(0).getDistributionKey(), is(24));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(0).getConfigId(), is("bar/storage/24"));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(1).getDistributionKey(), is(25));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(1).getConfigId(), is("bar/storage/25"));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(2).getDistributionKey(), is(26));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(2).getConfigId(), is("bar/storage/26"));
cluster = model.getContentClusters().get("baz");
clusterControllers = cluster.getClusterControllers();
assertEquals(3, clusterControllers.getContainers().size());
assertEquals("baz-controllers", clusterControllers.getName());
assertEquals("default01", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default02", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default27", clusterControllers.getContainers().get(2).getHostName());
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(27, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("baz/storage/0"));
assertEquals("default27", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("baz/storage/1"));
assertEquals("default26", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName());
assertEquals("default25", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(26).getIndex(), is("26"));
assertThat(cluster.getRootGroup().getSubgroups().get(26).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(26).getNodes().get(0).getDistributionKey(), is(26));
assertThat(cluster.getRootGroup().getSubgroups().get(26).getNodes().get(0).getConfigId(), is("baz/storage/26"));
}
@Test
public void testGroupsOfSize1() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='8' groups='8'/>" +
" </content>" +
"</services>";
int numberOfHosts = 18;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(3, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default01", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default02", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default08", clusterControllers.getContainers().get(2).getHostName());
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(8, cluster.getRootGroup().getSubgroups().size());
assertEquals(8, cluster.distributionBits());
assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("bar/storage/0"));
assertEquals("default08", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("bar/storage/1"));
assertEquals("default07", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(7).getIndex(), is("7"));
assertThat(cluster.getRootGroup().getSubgroups().get(7).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(7).getNodes().get(0).getDistributionKey(), is(7));
assertThat(cluster.getRootGroup().getSubgroups().get(7).getNodes().get(0).getConfigId(), is("bar/storage/7"));
assertEquals("default01", cluster.getRootGroup().getSubgroups().get(7).getNodes().get(0).getHostName());
}
@Test
public void testExplicitNonDedicatedClusterControllers() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <controllers><nodes dedicated='false' count='6'/></controllers>" +
" <nodes count='9' groups='3'/>" +
" </content>" +
"</services>";
int numberOfHosts = 19;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals( 8, cluster.distributionBits());
assertEquals("We get the closest odd number", 5, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default01", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default02", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default04", clusterControllers.getContainers().get(2).getHostName());
assertEquals("default05", clusterControllers.getContainers().get(3).getHostName());
assertEquals("default07", clusterControllers.getContainers().get(4).getHostName());
assertEquals("default09", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName());
assertEquals("default08", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getHostName());
assertEquals("default06", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName());
assertEquals("default03", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName());
}
@Test
public void testClusterControllersWithGroupSize2() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='8' groups='4'/>" +
" </content>" +
"</services>";
int numberOfHosts = 18;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals("We get the closest odd number", 3, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default01", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default03", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default08", clusterControllers.getContainers().get(2).getHostName());
}
@Test
public void testClusterControllersCanSupplementWithAllContainerClusters() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo1'>" +
" <nodes count='2'/>" +
" </container>" +
" <container version='1.0' id='foo2'>" +
" <nodes count='1'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <controllers><nodes dedicated='false' count='5'/></controllers>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
int numberOfHosts = 5;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(1, clusterControllers.getContainers().size());
}
@Test
public void testClusterControllersAreNotPlacedOnRetiredNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='9' groups='3'/>" +
" </content>" +
"</services>";
int numberOfHosts = 19;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "default09", "default06", "default03");
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(3, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("Skipping retired default09", "default01", clusterControllers.getContainers().get(0).getHostName());
assertEquals("Skipping retired default03", "default04", clusterControllers.getContainers().get(1).getHostName());
assertEquals("Skipping retired default06", "default08", clusterControllers.getContainers().get(2).getHostName());
}
@Test
public void testSlobroksClustersAreExpandedToIncludeRetiredNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
"</services>";
int numberOfHosts = 10;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "default09");
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
assertEquals("Includes retired node", 1+3, model.getAdmin().getSlobroks().size());
assertEquals("default01", model.getAdmin().getSlobroks().get(0).getHostName());
assertEquals("default02", model.getAdmin().getSlobroks().get(1).getHostName());
assertEquals("default10", model.getAdmin().getSlobroks().get(2).getHostName());
assertEquals("Included in addition because it is retired", "default09", model.getAdmin().getSlobroks().get(3).getHostName());
}
@Test
public void testSlobroksClustersAreExpandedToIncludeRetiredNodesWhenRetiredComesLast() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
"</services>";
int numberOfHosts = 10;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "default09", "default08");
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
assertEquals("Includes retired node", 3+2, model.getAdmin().getSlobroks().size());
assertEquals("default01", model.getAdmin().getSlobroks().get(0).getHostName());
assertEquals("default02", model.getAdmin().getSlobroks().get(1).getHostName());
assertEquals("default10", model.getAdmin().getSlobroks().get(2).getHostName());
assertEquals("Included in addition because it is retired", "default08", model.getAdmin().getSlobroks().get(3).getHostName());
assertEquals("Included in addition because it is retired", "default09", model.getAdmin().getSlobroks().get(4).getHostName());
}
@Test
public void testSlobroksAreSpreadOverAllContainerClusters() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <container version='1.0' id='bar'>" +
" <nodes count='3'/>" +
" </container>" +
"</services>";
int numberOfHosts = 13;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "default12", "default03", "default02");
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
assertEquals("Includes retired node", 3+3, model.getAdmin().getSlobroks().size());
assertEquals("default04", model.getAdmin().getSlobroks().get(0).getHostName());
assertEquals("default13", model.getAdmin().getSlobroks().get(1).getHostName());
assertEquals("Included in addition because it is retired", "default12", model.getAdmin().getSlobroks().get(2).getHostName());
assertEquals("default01", model.getAdmin().getSlobroks().get(3).getHostName());
assertEquals("Included in addition because it is retired", "default02", model.getAdmin().getSlobroks().get(4).getHostName());
assertEquals("Included in addition because it is retired", "default03", model.getAdmin().getSlobroks().get(5).getHostName());
}
@Test
public void testSlobroksAreSpreadOverAllContainerClustersExceptNodeAdmin() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='routing'>" +
" <nodes count='10'/>" +
" </container>" +
" <container version='1.0' id='node-admin'>" +
" <nodes count='3'/>" +
" </container>" +
"</services>";
int numberOfHosts = 13;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
tester.setApplicationId("hosted-vespa", "routing", "default");
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
Set<String> routingHosts = getClusterHostnames(model, "routing");
assertEquals(10, routingHosts.size());
Set<String> nodeAdminHosts = getClusterHostnames(model, "node-admin");
assertEquals(3, nodeAdminHosts.size());
Set<String> slobrokHosts = model.getAdmin().getSlobroks().stream()
.map(AbstractService::getHostName)
.collect(Collectors.toSet());
assertEquals(3, slobrokHosts.size());
assertThat(slobrokHosts, everyItem(isIn(routingHosts)));
assertThat(slobrokHosts, everyItem(not(isIn(nodeAdminHosts))));
}
private Set<String> getClusterHostnames(VespaModel model, String clusterId) {
return model.getHosts().stream()
.filter(host -> host.getServices().stream()
.anyMatch(serviceInfo -> Objects.equals(
serviceInfo.getProperty("clustername"),
Optional.of(clusterId))))
.map(HostInfo::getHostname)
.collect(Collectors.toSet());
}
@Test
public void test2ContentNodesProduces1ClusterController() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
int numberOfHosts = 2;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(1, clusterControllers.getContainers().size());
}
@Test
public void test2ContentNodesWithContainerClusterProducesMixedClusterControllerCluster() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container version='1.0' id='foo'>" +
" <nodes count='3'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(5);
VespaModel model = tester.createModel(services, true);
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(1, clusterControllers.getContainers().size());
}
@Ignore
@Test
public void test2ContentNodesOn2ClustersWithContainerClusterProducesMixedClusterControllerCluster() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container version='1.0' id='container'>" +
" <nodes count='3' flavor='container-node'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2' flavor='content1-node'/>" +
" </content>" +
" <content version='1.0' id='content2'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2' flavor='content2-node'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts("container-node", 3);
tester.addHosts("content1-node", 2);
tester.addHosts("content2-node", 2);
VespaModel model = tester.createModel(services, true);
ContentCluster cluster1 = model.getContentClusters().get("content1");
ContainerCluster clusterControllers1 = cluster1.getClusterControllers();
assertEquals(1, clusterControllers1.getContainers().size());
assertEquals("content1-node0", clusterControllers1.getContainers().get(0).getHostName());
assertEquals("content1-node1", clusterControllers1.getContainers().get(1).getHostName());
assertEquals("container-node0", clusterControllers1.getContainers().get(2).getHostName());
ContentCluster cluster2 = model.getContentClusters().get("content2");
ContainerCluster clusterControllers2 = cluster2.getClusterControllers();
assertEquals(3, clusterControllers2.getContainers().size());
assertEquals("content2-node0", clusterControllers2.getContainers().get(0).getHostName());
assertEquals("content2-node1", clusterControllers2.getContainers().get(1).getHostName());
assertEquals("We do not pick the container used to supplement another cluster",
"container-node1", clusterControllers2.getContainers().get(2).getHostName());
}
@Test
public void testExplicitDedicatedClusterControllers() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <controllers><nodes dedicated='true' count='4'/></controllers>" +
" <nodes count='9' groups='3'/>" +
" </content>" +
"</services>";
int numberOfHosts = 23;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(4, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default04", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default03", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default02", clusterControllers.getContainers().get(2).getHostName());
assertEquals("default01", clusterControllers.getContainers().get(3).getHostName());
}
@Test
public void testUsingNodesAndGroupCountAttributesAndGettingTooFewNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='3'>4</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24' groups='3'/>" +
" <engine><proton><searchable-copies>3</searchable-copies></proton></engine>" +
" </content>" +
"</services>";
int numberOfHosts = 6;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(2*3, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(2*3, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(2*3, cluster.redundancy().effectiveReadyCopies());
assertEquals("2|2|*", cluster.getRootGroup().getPartitions().get());
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(3, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(2));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("bar/storage/0"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getDistributionKey(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getConfigId(), is("bar/storage/1"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(2));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(2));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("bar/storage/2"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getDistributionKey(), is(3));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getConfigId(), is("bar/storage/3"));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getIndex(), is("2"));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().size(), is(2));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getDistributionKey(), is(4));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getConfigId(), is("bar/storage/4"));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(1).getDistributionKey(), is(5));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(1).getConfigId(), is("bar/storage/5"));
}
@Test
public void testUsingNodesCountAttributesAndGettingTooFewNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='8'>12</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24'/>" +
" <engine><proton><searchable-copies>5</searchable-copies></proton></engine>" +
" <dispatch><num-dispatch-groups>7</num-dispatch-groups></dispatch>" +
" </content>" +
"</services>";
int numberOfHosts = 4;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(4, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(4, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(4, cluster.redundancy().effectiveReadyCopies());
assertEquals(4, cluster.getSearch().getIndexed().getDispatchSpec().getGroups().size());
assertFalse(cluster.getRootGroup().getPartitions().isPresent());
assertEquals(4, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getNodes().size(), is(4));
assertThat(cluster.getRootGroup().getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0"));
assertThat(cluster.getRootGroup().getNodes().get(1).getDistributionKey(), is(1));
assertThat(cluster.getRootGroup().getNodes().get(1).getConfigId(), is("bar/storage/1"));
assertThat(cluster.getRootGroup().getNodes().get(2).getDistributionKey(), is(2));
assertThat(cluster.getRootGroup().getNodes().get(2).getConfigId(), is("bar/storage/2"));
assertThat(cluster.getRootGroup().getNodes().get(3).getDistributionKey(), is(3));
assertThat(cluster.getRootGroup().getNodes().get(3).getConfigId(), is("bar/storage/3"));
}
@Test
public void testUsingNodesAndGroupCountAttributesAndGettingJustOneNode() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='3'>4</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24' groups='3'/>" +
" <engine><proton><searchable-copies>3</searchable-copies></proton></engine>" +
" </content>" +
"</services>";
int numberOfHosts = 1;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(1, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default01", clusterControllers.getContainers().get(0).getHostName());
assertEquals(1, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(1, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(1, cluster.redundancy().effectiveReadyCopies());
assertFalse(cluster.getRootGroup().getPartitions().isPresent());
assertEquals(1, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0"));
}
@Test(expected = IllegalArgumentException.class)
public void testRequiringMoreNodesThanAreAvailable() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <content version='1.0' id='bar'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='3' required='true'/>" +
" </content>" +
"</services>";
int numberOfHosts = 2;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
tester.createModel(services, false);
}
@Test
public void testUsingNodesCountAttributesAndGettingJustOneNode() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='8'>12</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24'/>" +
" <engine><proton><searchable-copies>5</searchable-copies></proton></engine>" +
" <dispatch><num-dispatch-groups>7</num-dispatch-groups></dispatch>" +
" </content>" +
"</services>";
int numberOfHosts = 1;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(1, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(1, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(1, cluster.redundancy().effectiveReadyCopies());
assertEquals(1, cluster.getSearch().getIndexed().getDispatchSpec().getGroups().size());
assertFalse(cluster.getRootGroup().getPartitions().isPresent());
assertEquals(1, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0"));
}
@Test
public void testRequestingSpecificFlavors() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'>" +
" <logservers><nodes count='1' dedicated='true' flavor='logserver-flavor'/></logservers>" +
" <slobroks><nodes count='2' dedicated='true' flavor='slobrok-flavor'/></slobroks>" +
" </admin>" +
" <container version='1.0' id='container'>" +
" <nodes count='4' flavor='container-flavor'/>" +
" </container>" +
" <content version='1.0' id='foo'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <controllers><nodes count='2' dedicated='true' flavor='controller-foo-flavor'/></controllers>" +
" <nodes count='5' flavor='content-foo-flavor'/>" +
" </content>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <controllers><nodes count='3' dedicated='true' flavor='controller-bar-flavor'/></controllers>" +
" <nodes count='6' flavor='content-bar-flavor'/>" +
" </content>" +
"</services>";
int totalHosts = 23;
VespaModelTester tester = new VespaModelTester();
tester.addHosts("logserver-flavor", 1);
tester.addHosts("slobrok-flavor", 2);
tester.addHosts("container-flavor", 4);
tester.addHosts("controller-foo-flavor", 2);
tester.addHosts("content-foo-flavor", 5);
tester.addHosts("controller-bar-flavor", 3);
tester.addHosts("content-bar-flavor", 6);
VespaModel model = tester.createModel(services, true, 0);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(totalHosts));
}
@Test
public void testJDiscOnly() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<jdisc version='1.0'>" +
" <search/>" +
" <nodes count='3'/>" +
"</jdisc>";
int numberOfHosts = 3;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertEquals(numberOfHosts, model.getRoot().getHostSystem().getHosts().size());
assertEquals(3, model.getContainerClusters().get("jdisc").getContainers().size());
assertNotNull(model.getAdmin().getLogserver());
assertEquals(3, model.getAdmin().getSlobroks().size());
}
@Test
public void testUsingHostaliasWithProvisioner() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
"<admin version='2.0'>" +
" <adminserver hostalias='node1'/>\n"+
"</admin>\n" +
"<jdisc id='mydisc' version='1.0'>" +
" <handler id='myHandler'>" +
" <component id='injected' />" +
" </handler>" +
" <nodes>" +
" <node hostalias='node1'/>" +
" </nodes>" +
"</jdisc>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().getHostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
}
@Test
public void testThatStandaloneSyntaxWorksOnHostedVespa() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<jdisc id='foo' version='1.0'>" +
" <http>" +
" <server id='server1' port='" + getDefaults().vespaWebServicePort() + "' />" +
" </http>" +
"</jdisc>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertThat(model.getHosts().size(), is(1));
assertThat(model.getContainerClusters().size(), is(1));
}
@Test
public void testNoNodeTagMeans1Node() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <jdisc id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" </jdisc>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().getHostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes());
}
@Test
public void testNoNodeTagMeans1NodeNoContent() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <jdisc id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" </jdisc>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().getHostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
}
@Test
public void testNoNodeTagMeans1NodeNonHosted() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <jdisc id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" </jdisc>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(false);
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().getHostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
assertEquals(1, model.getContentClusters().get("bar").getRootGroup().recursiveGetNodes().size());
}
@Test
public void testSingleNodeNonHosted() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <jdisc id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" <nodes><node hostalias='foo'/></nodes>"+
" </jdisc>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes><node hostalias='foo' distribution-key='0'/></nodes>"+
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(false);
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().getHostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes());
}
/** Recreate the combination used in some factory tests */
@Test
public void testMultitenantButNotHosted() {
String services =
"<?xml version='1.0' encoding='UTF-8' ?>" +
"<services version='1.0'>" +
" <admin version='2.0'>" +
" <adminserver hostalias='node1'/>" +
" </admin>" +
" <jdisc id='default' version='1.0'>" +
" <search/>" +
" <nodes>" +
" <node hostalias='node1'/>" +
" </nodes>" +
" </jdisc>" +
" <content id='storage' version='1.0'>" +
" <redundancy>2</redundancy>" +
" <group>" +
" <node distribution-key='0' hostalias='node1'/>" +
" <node distribution-key='1' hostalias='node1'/>" +
" </group>" +
" <tuning>" +
" <cluster-controller>" +
" <transition-time>0</transition-time>" +
" </cluster-controller>" +
" </tuning>" +
" <documents>" +
" <document mode='store-only' type='type1'/>" +
" </documents>" +
" <engine>" +
" <proton/>" +
" </engine>" +
" </content>" +
" </services>";
VespaModel model = createNonProvisionedMultitenantModel(services);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(1));
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(2, content.getRootGroup().getNodes().size());
ContainerCluster controller = content.getClusterControllers();
assertEquals(1, controller.getContainers().size());
}
@Test
@Test
public void testMultitenantButNotHostedSharedContentNode() {
String services =
"<?xml version='1.0' encoding='UTF-8' ?>" +
"<services version='1.0'>" +
" <admin version='2.0'>" +
" <adminserver hostalias='node1'/>" +
" </admin>" +
" <jdisc id='default' version='1.0'>" +
" <search/>" +
" <nodes>" +
" <node hostalias='node1'/>" +
" </nodes>" +
" </jdisc>" +
" <content id='storage' version='1.0'>" +
" <redundancy>2</redundancy>" +
" <group>" +
" <node distribution-key='0' hostalias='node1'/>" +
" <node distribution-key='1' hostalias='node1'/>" +
" </group>" +
" <tuning>" +
" <cluster-controller>" +
" <transition-time>0</transition-time>" +
" </cluster-controller>" +
" </tuning>" +
" <documents>" +
" <document mode='store-only' type='type1'/>" +
" </documents>" +
" <engine>" +
" <proton/>" +
" </engine>" +
" </content>" +
" <content id='search' version='1.0'>" +
" <redundancy>2</redundancy>" +
" <group>" +
" <node distribution-key='0' hostalias='node1'/>" +
" </group>" +
" <documents>" +
" <document type='type1'/>" +
" </documents>" +
" </content>" +
" </services>";
VespaModel model = createNonProvisionedMultitenantModel(services);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(1));
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(2, content.getRootGroup().getNodes().size());
ContainerCluster controller = content.getClusterControllers();
assertEquals(1, controller.getContainers().size());
}
private VespaModel createNonProvisionedMultitenantModel(String services) {
VespaModelCreatorWithMockPkg modelCreatorWithMockPkg = new VespaModelCreatorWithMockPkg(null, services, ApplicationPackageUtils.generateSearchDefinition("type1"));
ApplicationPackage appPkg = modelCreatorWithMockPkg.appPkg;
DeployState deployState = new DeployState.Builder().applicationPackage(appPkg).
properties((new DeployProperties.Builder()).multitenant(true).build()).
build(true);
return modelCreatorWithMockPkg.create(false, deployState);
}
@Test
public void testThatTldConfigIdsAreDeterministic() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <jdisc version='1.0' id='jdisc0'>" +
" <search/>" +
" <nodes count='2'/>" +
" </jdisc>" +
" <jdisc version='1.0' id='jdisc1'>" +
" <search/>" +
" <nodes count='2'/>" +
" </jdisc>" +
" <content version='1.0' id='content0'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
int numberOfHosts = 8;
{
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
Map<String, ContentCluster> contentClusters = model.getContentClusters();
assertEquals(2, contentClusters.size());
checkThatTldAndContainerRunningOnSameHostHaveSameId(
model.getContainerClusters().values(),
model.getContentClusters().values(),
0);
}
{
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts + 1);
VespaModel model = tester.createModel(services, true, 1, "default0");
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
Map<String, ContentCluster> contentClusters = model.getContentClusters();
assertEquals(2, contentClusters.size());
checkThatTldAndContainerRunningOnSameHostHaveSameId(
model.getContainerClusters().values(),
model.getContentClusters().values(),
1);
}
}
private void checkThatTldAndContainerRunningOnSameHostHaveSameId(Collection<ContainerCluster> containerClusters,
Collection<ContentCluster> contentClusters,
int startIndexForContainerIds) {
for (ContentCluster contentCluster : contentClusters) {
String contentClusterName = contentCluster.getName();
int i = 0;
for (ContainerCluster containerCluster : containerClusters) {
String containerClusterName = containerCluster.getName();
for (int j = 0; j < 2; j++) {
Dispatch tld = contentCluster.getSearch().getIndexed().getTLDs().get(2 * i + j);
Container container = containerCluster.getContainers().get(j);
int containerConfigIdIndex = j + startIndexForContainerIds;
assertEquals(container.getHostName(), tld.getHostname());
assertEquals(contentClusterName + "/search/cluster." + contentClusterName + "/tlds/" +
containerClusterName + "." + containerConfigIdIndex + ".tld." + containerConfigIdIndex,
tld.getConfigId());
assertEquals(containerClusterName + "/" + "container." + containerConfigIdIndex,
container.getConfigId());
}
i++;
}
}
}
private int physicalMemoryPercentage(ContainerCluster cluster) {
QrStartConfig.Builder b = new QrStartConfig.Builder();
cluster.getSearch().getConfig(b);
return new QrStartConfig(b).jvm().heapSizeAsPercentageOfPhysicalMemory();
}
@Test
public void require_that_proton_config_is_tuned_based_on_node_flavor() {
String services = joinLines("<?xml version='1.0' encoding='utf-8' ?>",
"<services>",
" <content version='1.0' id='test'>",
" <documents>",
" <document type='type1' mode='index'/>",
" </documents>",
" <nodes count='2' flavor='content-test-flavor'/>",
" </content>",
"</services>");
VespaModelTester tester = new VespaModelTester();
tester.addHosts(createFlavorFromDiskSetting("content-test-flavor", false), 2);
VespaModel model = tester.createModel(services, true, 0);
ContentSearchCluster cluster = model.getContentClusters().get("test").getSearch();
assertEquals(2, cluster.getSearchNodes().size());
assertEquals(40, getProtonConfig(cluster, 0).hwinfo().disk().writespeed(), 0.001);
assertEquals(40, getProtonConfig(cluster, 1).hwinfo().disk().writespeed(), 0.001);
}
private static Flavor createFlavorFromDiskSetting(String name, boolean fastDisk) {
return new Flavor(new FlavorsConfig.Flavor(new FlavorsConfig.Flavor.Builder().
name(name).fastDisk(fastDisk)));
}
private static ProtonConfig getProtonConfig(ContentSearchCluster cluster, int searchNodeIdx) {
ProtonConfig.Builder builder = new ProtonConfig.Builder();
List<SearchNode> searchNodes = cluster.getSearchNodes();
assertTrue(searchNodeIdx < searchNodes.size());
searchNodes.get(searchNodeIdx).getConfig(builder);
return new ProtonConfig(builder);
}
@Test
public void require_that_config_override_and_explicit_proton_tuning_have_precedence_over_default_node_flavor_tuning() {
String services = joinLines("<?xml version='1.0' encoding='utf-8' ?>",
"<services>",
" <content version='1.0' id='test'>",
" <config name='vespa.config.search.core.proton'>",
" <flush><memory><maxtlssize>2000</maxtlssize></memory></flush>",
" </config>",
" <documents>",
" <document type='type1' mode='index'/>",
" </documents>",
" <nodes count='1' flavor='content-test-flavor'/>",
" <engine>",
" <proton>",
" <tuning>",
" <searchnode>",
" <flushstrategy>",
" <native>",
" <total>",
" <maxmemorygain>1000</maxmemorygain>",
" </total>",
" </native>",
" </flushstrategy>",
" </searchnode>",
" </tuning>",
" </proton>",
" </engine>",
" </content>",
"</services>");
VespaModelTester tester = new VespaModelTester();
tester.addHosts("default", 1);
tester.addHosts(createFlavorFromMemoryAndDisk("content-test-flavor", 128, 100), 1);
VespaModel model = tester.createModel(services, true, 0);
ContentSearchCluster cluster = model.getContentClusters().get("test").getSearch();
ProtonConfig cfg = getProtonConfig(model, cluster.getSearchNodes().get(0).getConfigId());
assertEquals(2000, cfg.flush().memory().maxtlssize());
assertEquals(1000, cfg.flush().memory().maxmemory());
assertEquals((long) 16 * GB, cfg.flush().memory().each().maxmemory());
}
private static long GB = 1024 * 1024 * 1024;
private static Flavor createFlavorFromMemoryAndDisk(String name, int memoryGb, int diskGb) {
return new Flavor(new FlavorsConfig.Flavor(new FlavorsConfig.Flavor.Builder().
name(name).minMainMemoryAvailableGb(memoryGb).minDiskAvailableGb(diskGb)));
}
private static ProtonConfig getProtonConfig(VespaModel model, String configId) {
ProtonConfig.Builder builder = new ProtonConfig.Builder();
model.getConfig(builder, configId);
return new ProtonConfig(builder);
}
} | class ModelProvisioningTest {
@Test
public void testNodeCountForJdisc() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>\n" +
"\n" +
"<admin version='3.0'><nodes count='1' /></admin>\n" +
"<jdisc id='mydisc' version='1.0'>" +
" <handler id='myHandler'>" +
" <component id='injected' />" +
" </handler>" +
" <nodes count=\"3\"/>" +
"</jdisc>" +
"<jdisc id='mydisc2' version='1.0'>" +
" <document-processing/>" +
" <handler id='myHandler'>" +
" <component id='injected' />" +
" </handler>" +
" <nodes count='2' allocated-memory='45%' jvmargs='-verbosegc' preload='lib/blablamalloc.so'/>" +
"</jdisc>" +
"</services>";
String hosts ="<hosts>"
+ " <host name='myhost0'>"
+ " <alias>node0</alias>"
+ " </host>"
+ " <host name='myhost1'>"
+ " <alias>node1</alias>"
+ " </host>"
+ " <host name='myhost2'>"
+ " <alias>node2</alias>"
+ " </host>"
+ " <host name='myhost3'>"
+ " <alias>node3</alias>"
+ " </host>"
+ " <host name='myhost4'>"
+ " <alias>node4</alias>"
+ " </host>"
+ " <host name='myhost5'>"
+ " <alias>node5</alias>"
+ " </host>"
+ "</hosts>";
VespaModelCreatorWithMockPkg creator = new VespaModelCreatorWithMockPkg(null, services);
VespaModel model = creator.create(new DeployState.Builder().modelHostProvisioner(new InMemoryProvisioner(Hosts.readFrom(new StringReader(hosts)), true)));
assertThat(model.getContainerClusters().get("mydisc").getContainers().size(), is(3));
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(0).getConfigId(), is("mydisc/container.0"));
assertTrue(model.getContainerClusters().get("mydisc").getContainers().get(0).isInitialized());
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(1).getConfigId(), is("mydisc/container.1"));
assertTrue(model.getContainerClusters().get("mydisc").getContainers().get(1).isInitialized());
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(2).getConfigId(), is("mydisc/container.2"));
assertTrue(model.getContainerClusters().get("mydisc").getContainers().get(2).isInitialized());
assertThat(model.getContainerClusters().get("mydisc2").getContainers().size(), is(2));
assertThat(model.getContainerClusters().get("mydisc2").getContainers().get(0).getConfigId(), is("mydisc2/container.0"));
assertTrue(model.getContainerClusters().get("mydisc2").getContainers().get(0).isInitialized());
assertThat(model.getContainerClusters().get("mydisc2").getContainers().get(1).getConfigId(), is("mydisc2/container.1"));
assertTrue(model.getContainerClusters().get("mydisc2").getContainers().get(1).isInitialized());
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(0).getJvmArgs(), is(""));
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(1).getJvmArgs(), is(""));
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(2).getJvmArgs(), is(""));
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(0).getPreLoad(), is(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so")));
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(1).getPreLoad(), is(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so")));
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(2).getPreLoad(), is(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so")));
assertThat(model.getContainerClusters().get("mydisc").getMemoryPercentage(), is(Optional.empty()));
assertThat(model.getContainerClusters().get("mydisc2").getContainers().get(0).getJvmArgs(), is("-verbosegc"));
assertThat(model.getContainerClusters().get("mydisc2").getContainers().get(1).getJvmArgs(), is("-verbosegc"));
assertThat(model.getContainerClusters().get("mydisc2").getContainers().get(0).getPreLoad(), is("lib/blablamalloc.so"));
assertThat(model.getContainerClusters().get("mydisc2").getContainers().get(1).getPreLoad(), is("lib/blablamalloc.so"));
assertThat(model.getContainerClusters().get("mydisc2").getMemoryPercentage(), is(Optional.of(45)));
HostSystem hostSystem = model.getHostSystem();
assertNotNull(hostSystem.getHostByHostname("myhost0"));
assertNotNull(hostSystem.getHostByHostname("myhost1"));
assertNotNull(hostSystem.getHostByHostname("myhost2"));
assertNotNull(hostSystem.getHostByHostname("myhost3"));
assertNull(hostSystem.getHostByHostname("Nope"));
}
@Test
public void testNodeCountForContentGroup() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
"\n" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
int numberOfHosts = 2;
tester.addHosts(numberOfHosts);
int numberOfContentNodes = 2;
VespaModel model = tester.createModel(xmlWithNodes, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
final Map<String, ContentCluster> contentClusters = model.getContentClusters();
ContentCluster cluster = contentClusters.get("bar");
assertThat(cluster.getRootGroup().getNodes().size(), is(numberOfContentNodes));
int i = 0;
for (StorageNode node : cluster.getRootGroup().getNodes())
assertEquals(i++, node.getDistributionKey());
}
@Test
public void testSeparateClusters() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes count='1'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(3);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size());
assertEquals("Nodes in container1", 1, model.getContainerClusters().get("container1").getContainers().size());
assertEquals("Heap size for container", 60, physicalMemoryPercentage(model.getContainerClusters().get("container1")));
}
@Test
public void testClusterMembership() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes count='1'/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals(1, model.getHostSystem().getHosts().size());
HostResource host = model.getHostSystem().getHosts().iterator().next();
assertEquals(1, host.clusterMemberships().size());
ClusterMembership membership = host.clusterMemberships().iterator().next();
assertEquals("container", membership.cluster().type().name());
assertEquals("container1", membership.cluster().id().value());
}
@Test
public void testCombinedCluster() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes of='content1'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size());
assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size());
assertEquals("Heap size is lowered with combined clusters",
17, physicalMemoryPercentage(model.getContainerClusters().get("container1")));
}
@Test
public void testCombinedClusterWithJvmArgs() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <document-processing/>" +
" <nodes of='content1' jvmargs='testarg'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size());
assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size());
for (Container container : model.getContainerClusters().get("container1").getContainers())
assertTrue(container.getJvmArgs().contains("testarg"));
}
@Test
public void testMultipleCombinedClusters() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes of='content1'/>" +
" </container>" +
" <container version='1.0' id='container2'>" +
" <nodes of='content2'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
" <content version='1.0' id='content2'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='3'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(5);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size());
assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size());
assertEquals("Nodes in content2", 3, model.getContentClusters().get("content2").getRootGroup().getNodes().size());
assertEquals("Nodes in container2", 3, model.getContainerClusters().get("container2").getContainers().size());
}
@Test
public void testNonExistingCombinedClusterReference() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes of='container2'/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
try {
tester.createModel(xmlWithNodes, true);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("container cluster 'container1' references service 'container2' but this service is not defined", e.getMessage());
}
}
@Test
public void testInvalidCombinedClusterReference() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes of='container2'/><!-- invalid; only content clusters can be referenced -->" +
" </container>" +
" <container version='1.0' id='container2'>" +
" <nodes count='2'/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
try {
tester.createModel(xmlWithNodes, true);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("container cluster 'container1' references service 'container2', but that is not a content service", e.getMessage());
}
}
@Test
public void testUsingNodesAndGroupCountAttributes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='27' groups='9'/>" +
" </content>" +
" <content version='1.0' id='baz'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='27' groups='27'/>" +
" </content>" +
"</services>";
int numberOfHosts = 64;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
assertEquals(1, model.getContainerClusters().size());
Set<com.yahoo.vespa.model.Host> containerHosts = model.getContainerClusters().get("foo").getContainers().stream().map(Container::getHost).collect(Collectors.toSet());
assertEquals(10, containerHosts.size());
Admin admin = model.getAdmin();
Set<com.yahoo.vespa.model.Host> slobrokHosts = admin.getSlobroks().stream().map(Slobrok::getHost).collect(Collectors.toSet());
assertEquals(3, slobrokHosts.size());
assertTrue("Slobroks are assigned from container nodes", containerHosts.containsAll(slobrokHosts));
assertTrue("Logserver is assigned from container nodes", containerHosts.contains(admin.getLogserver().getHost()));
assertEquals("No in-cluster config servers in a hosted environment", 0, admin.getConfigservers().size());
assertEquals("No admin cluster controller when multitenant", null, admin.getClusterControllers());
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(3, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default28", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default31", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default54", clusterControllers.getContainers().get(2).getHostName());
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(9, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(3));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("bar/storage/0"));
assertEquals("default54", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getDistributionKey(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getConfigId(), is("bar/storage/1"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(2).getDistributionKey(), is(2));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(2).getConfigId(), is("bar/storage/2"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(3));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(3));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("bar/storage/3"));
assertEquals("default51", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getDistributionKey(), is(4));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getConfigId(), is("bar/storage/4"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(2).getDistributionKey(), is(5));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(2).getConfigId(), is("bar/storage/5"));
assertEquals("default48", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(8).getIndex(), is("8"));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().size(), is(3));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(0).getDistributionKey(), is(24));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(0).getConfigId(), is("bar/storage/24"));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(1).getDistributionKey(), is(25));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(1).getConfigId(), is("bar/storage/25"));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(2).getDistributionKey(), is(26));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(2).getConfigId(), is("bar/storage/26"));
cluster = model.getContentClusters().get("baz");
clusterControllers = cluster.getClusterControllers();
assertEquals(3, clusterControllers.getContainers().size());
assertEquals("baz-controllers", clusterControllers.getName());
assertEquals("default01", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default02", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default27", clusterControllers.getContainers().get(2).getHostName());
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(27, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("baz/storage/0"));
assertEquals("default27", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("baz/storage/1"));
assertEquals("default26", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName());
assertEquals("default25", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(26).getIndex(), is("26"));
assertThat(cluster.getRootGroup().getSubgroups().get(26).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(26).getNodes().get(0).getDistributionKey(), is(26));
assertThat(cluster.getRootGroup().getSubgroups().get(26).getNodes().get(0).getConfigId(), is("baz/storage/26"));
}
@Test
public void testGroupsOfSize1() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='8' groups='8'/>" +
" </content>" +
"</services>";
int numberOfHosts = 18;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(3, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default01", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default02", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default08", clusterControllers.getContainers().get(2).getHostName());
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(8, cluster.getRootGroup().getSubgroups().size());
assertEquals(8, cluster.distributionBits());
assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("bar/storage/0"));
assertEquals("default08", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("bar/storage/1"));
assertEquals("default07", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(7).getIndex(), is("7"));
assertThat(cluster.getRootGroup().getSubgroups().get(7).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(7).getNodes().get(0).getDistributionKey(), is(7));
assertThat(cluster.getRootGroup().getSubgroups().get(7).getNodes().get(0).getConfigId(), is("bar/storage/7"));
assertEquals("default01", cluster.getRootGroup().getSubgroups().get(7).getNodes().get(0).getHostName());
}
@Test
public void testExplicitNonDedicatedClusterControllers() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <controllers><nodes dedicated='false' count='6'/></controllers>" +
" <nodes count='9' groups='3'/>" +
" </content>" +
"</services>";
int numberOfHosts = 19;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals( 8, cluster.distributionBits());
assertEquals("We get the closest odd number", 5, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default01", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default02", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default04", clusterControllers.getContainers().get(2).getHostName());
assertEquals("default05", clusterControllers.getContainers().get(3).getHostName());
assertEquals("default07", clusterControllers.getContainers().get(4).getHostName());
assertEquals("default09", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName());
assertEquals("default08", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getHostName());
assertEquals("default06", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName());
assertEquals("default03", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName());
}
@Test
public void testClusterControllersWithGroupSize2() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='8' groups='4'/>" +
" </content>" +
"</services>";
int numberOfHosts = 18;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals("We get the closest odd number", 3, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default01", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default03", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default08", clusterControllers.getContainers().get(2).getHostName());
}
@Test
public void testClusterControllersCanSupplementWithAllContainerClusters() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo1'>" +
" <nodes count='2'/>" +
" </container>" +
" <container version='1.0' id='foo2'>" +
" <nodes count='1'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <controllers><nodes dedicated='false' count='5'/></controllers>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
int numberOfHosts = 5;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(1, clusterControllers.getContainers().size());
}
@Test
public void testClusterControllersAreNotPlacedOnRetiredNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='9' groups='3'/>" +
" </content>" +
"</services>";
int numberOfHosts = 19;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "default09", "default06", "default03");
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(3, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("Skipping retired default09", "default01", clusterControllers.getContainers().get(0).getHostName());
assertEquals("Skipping retired default03", "default04", clusterControllers.getContainers().get(1).getHostName());
assertEquals("Skipping retired default06", "default08", clusterControllers.getContainers().get(2).getHostName());
}
@Test
public void testSlobroksClustersAreExpandedToIncludeRetiredNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
"</services>";
int numberOfHosts = 10;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "default09");
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
assertEquals("Includes retired node", 1+3, model.getAdmin().getSlobroks().size());
assertEquals("default01", model.getAdmin().getSlobroks().get(0).getHostName());
assertEquals("default02", model.getAdmin().getSlobroks().get(1).getHostName());
assertEquals("default10", model.getAdmin().getSlobroks().get(2).getHostName());
assertEquals("Included in addition because it is retired", "default09", model.getAdmin().getSlobroks().get(3).getHostName());
}
@Test
public void testSlobroksClustersAreExpandedToIncludeRetiredNodesWhenRetiredComesLast() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
"</services>";
int numberOfHosts = 10;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "default09", "default08");
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
assertEquals("Includes retired node", 3+2, model.getAdmin().getSlobroks().size());
assertEquals("default01", model.getAdmin().getSlobroks().get(0).getHostName());
assertEquals("default02", model.getAdmin().getSlobroks().get(1).getHostName());
assertEquals("default10", model.getAdmin().getSlobroks().get(2).getHostName());
assertEquals("Included in addition because it is retired", "default08", model.getAdmin().getSlobroks().get(3).getHostName());
assertEquals("Included in addition because it is retired", "default09", model.getAdmin().getSlobroks().get(4).getHostName());
}
@Test
public void testSlobroksAreSpreadOverAllContainerClusters() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <container version='1.0' id='bar'>" +
" <nodes count='3'/>" +
" </container>" +
"</services>";
int numberOfHosts = 13;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "default12", "default03", "default02");
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
assertEquals("Includes retired node", 3+3, model.getAdmin().getSlobroks().size());
assertEquals("default04", model.getAdmin().getSlobroks().get(0).getHostName());
assertEquals("default13", model.getAdmin().getSlobroks().get(1).getHostName());
assertEquals("Included in addition because it is retired", "default12", model.getAdmin().getSlobroks().get(2).getHostName());
assertEquals("default01", model.getAdmin().getSlobroks().get(3).getHostName());
assertEquals("Included in addition because it is retired", "default02", model.getAdmin().getSlobroks().get(4).getHostName());
assertEquals("Included in addition because it is retired", "default03", model.getAdmin().getSlobroks().get(5).getHostName());
}
@Test
public void testSlobroksAreSpreadOverAllContainerClustersExceptNodeAdmin() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='routing'>" +
" <nodes count='10'/>" +
" </container>" +
" <container version='1.0' id='node-admin'>" +
" <nodes count='3'/>" +
" </container>" +
"</services>";
int numberOfHosts = 13;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
tester.setApplicationId("hosted-vespa", "routing", "default");
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
Set<String> routingHosts = getClusterHostnames(model, "routing");
assertEquals(10, routingHosts.size());
Set<String> nodeAdminHosts = getClusterHostnames(model, "node-admin");
assertEquals(3, nodeAdminHosts.size());
Set<String> slobrokHosts = model.getAdmin().getSlobroks().stream()
.map(AbstractService::getHostName)
.collect(Collectors.toSet());
assertEquals(3, slobrokHosts.size());
assertThat(slobrokHosts, everyItem(isIn(routingHosts)));
assertThat(slobrokHosts, everyItem(not(isIn(nodeAdminHosts))));
}
private Set<String> getClusterHostnames(VespaModel model, String clusterId) {
return model.getHosts().stream()
.filter(host -> host.getServices().stream()
.anyMatch(serviceInfo -> Objects.equals(
serviceInfo.getProperty("clustername"),
Optional.of(clusterId))))
.map(HostInfo::getHostname)
.collect(Collectors.toSet());
}
@Test
public void test2ContentNodesProduces1ClusterController() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
int numberOfHosts = 2;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(1, clusterControllers.getContainers().size());
}
@Test
public void test2ContentNodesWithContainerClusterProducesMixedClusterControllerCluster() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container version='1.0' id='foo'>" +
" <nodes count='3'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(5);
VespaModel model = tester.createModel(services, true);
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(1, clusterControllers.getContainers().size());
}
@Ignore
@Test
public void test2ContentNodesOn2ClustersWithContainerClusterProducesMixedClusterControllerCluster() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container version='1.0' id='container'>" +
" <nodes count='3' flavor='container-node'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2' flavor='content1-node'/>" +
" </content>" +
" <content version='1.0' id='content2'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2' flavor='content2-node'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts("container-node", 3);
tester.addHosts("content1-node", 2);
tester.addHosts("content2-node", 2);
VespaModel model = tester.createModel(services, true);
ContentCluster cluster1 = model.getContentClusters().get("content1");
ContainerCluster clusterControllers1 = cluster1.getClusterControllers();
assertEquals(1, clusterControllers1.getContainers().size());
assertEquals("content1-node0", clusterControllers1.getContainers().get(0).getHostName());
assertEquals("content1-node1", clusterControllers1.getContainers().get(1).getHostName());
assertEquals("container-node0", clusterControllers1.getContainers().get(2).getHostName());
ContentCluster cluster2 = model.getContentClusters().get("content2");
ContainerCluster clusterControllers2 = cluster2.getClusterControllers();
assertEquals(3, clusterControllers2.getContainers().size());
assertEquals("content2-node0", clusterControllers2.getContainers().get(0).getHostName());
assertEquals("content2-node1", clusterControllers2.getContainers().get(1).getHostName());
assertEquals("We do not pick the container used to supplement another cluster",
"container-node1", clusterControllers2.getContainers().get(2).getHostName());
}
@Test
public void testExplicitDedicatedClusterControllers() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <controllers><nodes dedicated='true' count='4'/></controllers>" +
" <nodes count='9' groups='3'/>" +
" </content>" +
"</services>";
int numberOfHosts = 23;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(4, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default04", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default03", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default02", clusterControllers.getContainers().get(2).getHostName());
assertEquals("default01", clusterControllers.getContainers().get(3).getHostName());
}
@Test
public void testUsingNodesAndGroupCountAttributesAndGettingTooFewNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='3'>4</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24' groups='3'/>" +
" <engine><proton><searchable-copies>3</searchable-copies></proton></engine>" +
" </content>" +
"</services>";
int numberOfHosts = 6;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(2*3, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(2*3, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(2*3, cluster.redundancy().effectiveReadyCopies());
assertEquals("2|2|*", cluster.getRootGroup().getPartitions().get());
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(3, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(2));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("bar/storage/0"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getDistributionKey(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getConfigId(), is("bar/storage/1"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(2));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(2));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("bar/storage/2"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getDistributionKey(), is(3));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getConfigId(), is("bar/storage/3"));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getIndex(), is("2"));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().size(), is(2));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getDistributionKey(), is(4));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getConfigId(), is("bar/storage/4"));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(1).getDistributionKey(), is(5));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(1).getConfigId(), is("bar/storage/5"));
}
@Test
public void testUsingNodesCountAttributesAndGettingTooFewNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='8'>12</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24'/>" +
" <engine><proton><searchable-copies>5</searchable-copies></proton></engine>" +
" <dispatch><num-dispatch-groups>7</num-dispatch-groups></dispatch>" +
" </content>" +
"</services>";
int numberOfHosts = 4;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(4, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(4, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(4, cluster.redundancy().effectiveReadyCopies());
assertEquals(4, cluster.getSearch().getIndexed().getDispatchSpec().getGroups().size());
assertFalse(cluster.getRootGroup().getPartitions().isPresent());
assertEquals(4, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getNodes().size(), is(4));
assertThat(cluster.getRootGroup().getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0"));
assertThat(cluster.getRootGroup().getNodes().get(1).getDistributionKey(), is(1));
assertThat(cluster.getRootGroup().getNodes().get(1).getConfigId(), is("bar/storage/1"));
assertThat(cluster.getRootGroup().getNodes().get(2).getDistributionKey(), is(2));
assertThat(cluster.getRootGroup().getNodes().get(2).getConfigId(), is("bar/storage/2"));
assertThat(cluster.getRootGroup().getNodes().get(3).getDistributionKey(), is(3));
assertThat(cluster.getRootGroup().getNodes().get(3).getConfigId(), is("bar/storage/3"));
}
@Test
public void testUsingNodesAndGroupCountAttributesAndGettingJustOneNode() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='3'>4</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24' groups='3'/>" +
" <engine><proton><searchable-copies>3</searchable-copies></proton></engine>" +
" </content>" +
"</services>";
int numberOfHosts = 1;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(1, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default01", clusterControllers.getContainers().get(0).getHostName());
assertEquals(1, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(1, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(1, cluster.redundancy().effectiveReadyCopies());
assertFalse(cluster.getRootGroup().getPartitions().isPresent());
assertEquals(1, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0"));
}
@Test(expected = IllegalArgumentException.class)
public void testRequiringMoreNodesThanAreAvailable() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <content version='1.0' id='bar'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='3' required='true'/>" +
" </content>" +
"</services>";
int numberOfHosts = 2;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
tester.createModel(services, false);
}
@Test
public void testUsingNodesCountAttributesAndGettingJustOneNode() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='8'>12</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24'/>" +
" <engine><proton><searchable-copies>5</searchable-copies></proton></engine>" +
" <dispatch><num-dispatch-groups>7</num-dispatch-groups></dispatch>" +
" </content>" +
"</services>";
int numberOfHosts = 1;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(1, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(1, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(1, cluster.redundancy().effectiveReadyCopies());
assertEquals(1, cluster.getSearch().getIndexed().getDispatchSpec().getGroups().size());
assertFalse(cluster.getRootGroup().getPartitions().isPresent());
assertEquals(1, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0"));
}
@Test
public void testRequestingSpecificFlavors() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'>" +
" <logservers><nodes count='1' dedicated='true' flavor='logserver-flavor'/></logservers>" +
" <slobroks><nodes count='2' dedicated='true' flavor='slobrok-flavor'/></slobroks>" +
" </admin>" +
" <container version='1.0' id='container'>" +
" <nodes count='4' flavor='container-flavor'/>" +
" </container>" +
" <content version='1.0' id='foo'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <controllers><nodes count='2' dedicated='true' flavor='controller-foo-flavor'/></controllers>" +
" <nodes count='5' flavor='content-foo-flavor'/>" +
" </content>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <controllers><nodes count='3' dedicated='true' flavor='controller-bar-flavor'/></controllers>" +
" <nodes count='6' flavor='content-bar-flavor'/>" +
" </content>" +
"</services>";
int totalHosts = 23;
VespaModelTester tester = new VespaModelTester();
tester.addHosts("logserver-flavor", 1);
tester.addHosts("slobrok-flavor", 2);
tester.addHosts("container-flavor", 4);
tester.addHosts("controller-foo-flavor", 2);
tester.addHosts("content-foo-flavor", 5);
tester.addHosts("controller-bar-flavor", 3);
tester.addHosts("content-bar-flavor", 6);
VespaModel model = tester.createModel(services, true, 0);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(totalHosts));
}
@Test
public void testJDiscOnly() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<jdisc version='1.0'>" +
" <search/>" +
" <nodes count='3'/>" +
"</jdisc>";
int numberOfHosts = 3;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertEquals(numberOfHosts, model.getRoot().getHostSystem().getHosts().size());
assertEquals(3, model.getContainerClusters().get("jdisc").getContainers().size());
assertNotNull(model.getAdmin().getLogserver());
assertEquals(3, model.getAdmin().getSlobroks().size());
}
@Test
public void testUsingHostaliasWithProvisioner() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
"<admin version='2.0'>" +
" <adminserver hostalias='node1'/>\n"+
"</admin>\n" +
"<jdisc id='mydisc' version='1.0'>" +
" <handler id='myHandler'>" +
" <component id='injected' />" +
" </handler>" +
" <nodes>" +
" <node hostalias='node1'/>" +
" </nodes>" +
"</jdisc>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().getHostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
}
@Test
public void testThatStandaloneSyntaxWorksOnHostedVespa() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<jdisc id='foo' version='1.0'>" +
" <http>" +
" <server id='server1' port='" + getDefaults().vespaWebServicePort() + "' />" +
" </http>" +
"</jdisc>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertThat(model.getHosts().size(), is(1));
assertThat(model.getContainerClusters().size(), is(1));
}
@Test
public void testNoNodeTagMeans1Node() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <jdisc id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" </jdisc>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().getHostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes());
}
@Test
public void testNoNodeTagMeans1NodeNoContent() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <jdisc id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" </jdisc>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().getHostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
}
@Test
public void testNoNodeTagMeans1NodeNonHosted() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <jdisc id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" </jdisc>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(false);
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().getHostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
assertEquals(1, model.getContentClusters().get("bar").getRootGroup().recursiveGetNodes().size());
}
@Test
public void testSingleNodeNonHosted() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <jdisc id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" <nodes><node hostalias='foo'/></nodes>"+
" </jdisc>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes><node hostalias='foo' distribution-key='0'/></nodes>"+
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(false);
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().getHostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes());
}
/** Recreate the combination used in some factory tests */
@Test
public void testMultitenantButNotHosted() {
String services =
"<?xml version='1.0' encoding='UTF-8' ?>" +
"<services version='1.0'>" +
" <admin version='2.0'>" +
" <adminserver hostalias='node1'/>" +
" </admin>" +
" <jdisc id='default' version='1.0'>" +
" <search/>" +
" <nodes>" +
" <node hostalias='node1'/>" +
" </nodes>" +
" </jdisc>" +
" <content id='storage' version='1.0'>" +
" <redundancy>2</redundancy>" +
" <group>" +
" <node distribution-key='0' hostalias='node1'/>" +
" <node distribution-key='1' hostalias='node1'/>" +
" </group>" +
" <tuning>" +
" <cluster-controller>" +
" <transition-time>0</transition-time>" +
" </cluster-controller>" +
" </tuning>" +
" <documents>" +
" <document mode='store-only' type='type1'/>" +
" </documents>" +
" <engine>" +
" <proton/>" +
" </engine>" +
" </content>" +
" </services>";
VespaModel model = createNonProvisionedMultitenantModel(services);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(1));
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(2, content.getRootGroup().getNodes().size());
ContainerCluster controller = content.getClusterControllers();
assertEquals(1, controller.getContainers().size());
}
@Test
@Test
public void testSharedNodesNotHosted() {
String hosts =
"<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
"<hosts>\n" +
" <host name=\"vespa-1\">\n" +
" <alias>vespa-1</alias>\n" +
" </host>\n" +
" <host name=\"vespa-2\">\n" +
" <alias>vespa-2</alias>\n" +
" </host>\n" +
" <host name=\"vespa-3\">\n" +
" <alias>vespa-3</alias>\n" +
" </host>\n" +
"</hosts>";
String services =
"<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
"<services version=\"1.0\">\n" +
"\n" +
" <admin version=\"2.0\">\n" +
" <adminserver hostalias=\"vespa-1\"/>\n" +
" <configservers>\n" +
" <configserver hostalias=\"vespa-1\"/>\n" +
" </configservers>\n" +
" </admin>\n" +
"\n" +
" <container id=\"container\" version=\"1.0\">\n" +
" <document-processing/>\n" +
" <document-api/>\n" +
" <search/>\n" +
" <nodes jvmargs=\"-Xms512m -Xmx512m\">\n" +
" <node hostalias=\"vespa-1\"/>\n" +
" <node hostalias=\"vespa-2\"/>\n" +
" <node hostalias=\"vespa-3\"/>\n" +
" </nodes>\n" +
" </container>\n" +
"\n" +
" <content id=\"storage\" version=\"1.0\">\n" +
" <search>\n" +
" <visibility-delay>1.0</visibility-delay>\n" +
" </search>\n" +
" <redundancy>2</redundancy>\n" +
" <documents>\n" +
" <document type=\"type1\" mode=\"index\"/>\n" +
" <document-processing cluster=\"container\"/>\n" +
" </documents>\n" +
" <nodes>\n" +
" <node hostalias=\"vespa-1\" distribution-key=\"0\"/>\n" +
" <node hostalias=\"vespa-2\" distribution-key=\"1\"/>\n" +
" <node hostalias=\"vespa-3\" distribution-key=\"2\"/>\n" +
" </nodes>\n" +
" </content>\n" +
"\n" +
"</services>";
VespaModel model = createNonProvisionedModel(false, hosts, services);
assertEquals(3, model.getRoot().getHostSystem().getHosts().size());
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(3, content.getRootGroup().getNodes().size());
}
@Test
public void testMultitenantButNotHostedSharedContentNode() {
String services =
"<?xml version='1.0' encoding='UTF-8' ?>" +
"<services version='1.0'>" +
" <admin version='2.0'>" +
" <adminserver hostalias='node1'/>" +
" </admin>" +
" <jdisc id='default' version='1.0'>" +
" <search/>" +
" <nodes>" +
" <node hostalias='node1'/>" +
" </nodes>" +
" </jdisc>" +
" <content id='storage' version='1.0'>" +
" <redundancy>2</redundancy>" +
" <group>" +
" <node distribution-key='0' hostalias='node1'/>" +
" <node distribution-key='1' hostalias='node1'/>" +
" </group>" +
" <tuning>" +
" <cluster-controller>" +
" <transition-time>0</transition-time>" +
" </cluster-controller>" +
" </tuning>" +
" <documents>" +
" <document mode='store-only' type='type1'/>" +
" </documents>" +
" <engine>" +
" <proton/>" +
" </engine>" +
" </content>" +
" <content id='search' version='1.0'>" +
" <redundancy>2</redundancy>" +
" <group>" +
" <node distribution-key='0' hostalias='node1'/>" +
" </group>" +
" <documents>" +
" <document type='type1'/>" +
" </documents>" +
" </content>" +
" </services>";
VespaModel model = createNonProvisionedMultitenantModel(services);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(1));
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(2, content.getRootGroup().getNodes().size());
ContainerCluster controller = content.getClusterControllers();
assertEquals(1, controller.getContainers().size());
}
private VespaModel createNonProvisionedMultitenantModel(String services) {
return createNonProvisionedModel(true, null, services);
}
private VespaModel createNonProvisionedModel(boolean multitenant, String hosts, String services) {
VespaModelCreatorWithMockPkg modelCreatorWithMockPkg = new VespaModelCreatorWithMockPkg(hosts, services, ApplicationPackageUtils.generateSearchDefinition("type1"));
ApplicationPackage appPkg = modelCreatorWithMockPkg.appPkg;
DeployState deployState = new DeployState.Builder().applicationPackage(appPkg).
properties((new DeployProperties.Builder()).multitenant(multitenant).build()).
build(true);
return modelCreatorWithMockPkg.create(false, deployState);
}
@Test
public void testThatTldConfigIdsAreDeterministic() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <jdisc version='1.0' id='jdisc0'>" +
" <search/>" +
" <nodes count='2'/>" +
" </jdisc>" +
" <jdisc version='1.0' id='jdisc1'>" +
" <search/>" +
" <nodes count='2'/>" +
" </jdisc>" +
" <content version='1.0' id='content0'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
int numberOfHosts = 8;
{
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
Map<String, ContentCluster> contentClusters = model.getContentClusters();
assertEquals(2, contentClusters.size());
checkThatTldAndContainerRunningOnSameHostHaveSameId(
model.getContainerClusters().values(),
model.getContentClusters().values(),
0);
}
{
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts + 1);
VespaModel model = tester.createModel(services, true, 1, "default0");
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
Map<String, ContentCluster> contentClusters = model.getContentClusters();
assertEquals(2, contentClusters.size());
checkThatTldAndContainerRunningOnSameHostHaveSameId(
model.getContainerClusters().values(),
model.getContentClusters().values(),
1);
}
}
private void checkThatTldAndContainerRunningOnSameHostHaveSameId(Collection<ContainerCluster> containerClusters,
Collection<ContentCluster> contentClusters,
int startIndexForContainerIds) {
for (ContentCluster contentCluster : contentClusters) {
String contentClusterName = contentCluster.getName();
int i = 0;
for (ContainerCluster containerCluster : containerClusters) {
String containerClusterName = containerCluster.getName();
for (int j = 0; j < 2; j++) {
Dispatch tld = contentCluster.getSearch().getIndexed().getTLDs().get(2 * i + j);
Container container = containerCluster.getContainers().get(j);
int containerConfigIdIndex = j + startIndexForContainerIds;
assertEquals(container.getHostName(), tld.getHostname());
assertEquals(contentClusterName + "/search/cluster." + contentClusterName + "/tlds/" +
containerClusterName + "." + containerConfigIdIndex + ".tld." + containerConfigIdIndex,
tld.getConfigId());
assertEquals(containerClusterName + "/" + "container." + containerConfigIdIndex,
container.getConfigId());
}
i++;
}
}
}
private int physicalMemoryPercentage(ContainerCluster cluster) {
QrStartConfig.Builder b = new QrStartConfig.Builder();
cluster.getSearch().getConfig(b);
return new QrStartConfig(b).jvm().heapSizeAsPercentageOfPhysicalMemory();
}
@Test
public void require_that_proton_config_is_tuned_based_on_node_flavor() {
String services = joinLines("<?xml version='1.0' encoding='utf-8' ?>",
"<services>",
" <content version='1.0' id='test'>",
" <documents>",
" <document type='type1' mode='index'/>",
" </documents>",
" <nodes count='2' flavor='content-test-flavor'/>",
" </content>",
"</services>");
VespaModelTester tester = new VespaModelTester();
tester.addHosts(createFlavorFromDiskSetting("content-test-flavor", false), 2);
VespaModel model = tester.createModel(services, true, 0);
ContentSearchCluster cluster = model.getContentClusters().get("test").getSearch();
assertEquals(2, cluster.getSearchNodes().size());
assertEquals(40, getProtonConfig(cluster, 0).hwinfo().disk().writespeed(), 0.001);
assertEquals(40, getProtonConfig(cluster, 1).hwinfo().disk().writespeed(), 0.001);
}
private static Flavor createFlavorFromDiskSetting(String name, boolean fastDisk) {
return new Flavor(new FlavorsConfig.Flavor(new FlavorsConfig.Flavor.Builder().
name(name).fastDisk(fastDisk)));
}
private static ProtonConfig getProtonConfig(ContentSearchCluster cluster, int searchNodeIdx) {
ProtonConfig.Builder builder = new ProtonConfig.Builder();
List<SearchNode> searchNodes = cluster.getSearchNodes();
assertTrue(searchNodeIdx < searchNodes.size());
searchNodes.get(searchNodeIdx).getConfig(builder);
return new ProtonConfig(builder);
}
@Test
public void require_that_config_override_and_explicit_proton_tuning_have_precedence_over_default_node_flavor_tuning() {
String services = joinLines("<?xml version='1.0' encoding='utf-8' ?>",
"<services>",
" <content version='1.0' id='test'>",
" <config name='vespa.config.search.core.proton'>",
" <flush><memory><maxtlssize>2000</maxtlssize></memory></flush>",
" </config>",
" <documents>",
" <document type='type1' mode='index'/>",
" </documents>",
" <nodes count='1' flavor='content-test-flavor'/>",
" <engine>",
" <proton>",
" <tuning>",
" <searchnode>",
" <flushstrategy>",
" <native>",
" <total>",
" <maxmemorygain>1000</maxmemorygain>",
" </total>",
" </native>",
" </flushstrategy>",
" </searchnode>",
" </tuning>",
" </proton>",
" </engine>",
" </content>",
"</services>");
VespaModelTester tester = new VespaModelTester();
tester.addHosts("default", 1);
tester.addHosts(createFlavorFromMemoryAndDisk("content-test-flavor", 128, 100), 1);
VespaModel model = tester.createModel(services, true, 0);
ContentSearchCluster cluster = model.getContentClusters().get("test").getSearch();
ProtonConfig cfg = getProtonConfig(model, cluster.getSearchNodes().get(0).getConfigId());
assertEquals(2000, cfg.flush().memory().maxtlssize());
assertEquals(1000, cfg.flush().memory().maxmemory());
assertEquals((long) 16 * GB, cfg.flush().memory().each().maxmemory());
}
private static long GB = 1024 * 1024 * 1024;
private static Flavor createFlavorFromMemoryAndDisk(String name, int memoryGb, int diskGb) {
return new Flavor(new FlavorsConfig.Flavor(new FlavorsConfig.Flavor.Builder().
name(name).minMainMemoryAvailableGb(memoryGb).minDiskAvailableGb(diskGb)));
}
private static ProtonConfig getProtonConfig(VespaModel model, String configId) {
ProtonConfig.Builder builder = new ProtonConfig.Builder();
model.getConfig(builder, configId);
return new ProtonConfig(builder);
}
} |
Fixed - thanks! | public DocprocChains getDocprocChains() {
if (containerDocproc == null)
throw new IllegalStateException("Search components not found in container cluster '" + getSubId() +
"': Add <document-processing/> to the cluster in services.xml");
return containerDocproc.getChains();
} | throw new IllegalStateException("Search components not found in container cluster '" + getSubId() + | public DocprocChains getDocprocChains() {
if (containerDocproc == null)
throw new IllegalStateException("Document processing components not found in container cluster '" + getSubId() +
"': Add <document-processing/> to the cluster in services.xml");
return containerDocproc.getChains();
} | class is not accessible
ProcessingHandler<?> processingHandler = new ProcessingHandler<>(
processingChains,
"com.yahoo.processing.handler.ProcessingHandler");
for (String binding: serverBindings)
processingHandler.addServerBindings(binding);
addComponent(processingHandler);
}
public ProcessingChains getProcessingChains() {
return processingChains;
} | class is not accessible
ProcessingHandler<?> processingHandler = new ProcessingHandler<>(
processingChains,
"com.yahoo.processing.handler.ProcessingHandler");
for (String binding: serverBindings)
processingHandler.addServerBindings(binding);
addComponent(processingHandler);
}
public ProcessingChains getProcessingChains() {
return processingChains;
} |
Using the joinLines() helper, you wouldn't have to type all those newlines. | public void testSharedNodesNotHosted() {
String hosts =
"<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
"<hosts>\n" +
" <host name=\"vespa-1\">\n" +
" <alias>vespa-1</alias>\n" +
" </host>\n" +
" <host name=\"vespa-2\">\n" +
" <alias>vespa-2</alias>\n" +
" </host>\n" +
" <host name=\"vespa-3\">\n" +
" <alias>vespa-3</alias>\n" +
" </host>\n" +
"</hosts>";
String services =
"<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
"<services version=\"1.0\">\n" +
"\n" +
" <admin version=\"2.0\">\n" +
" <adminserver hostalias=\"vespa-1\"/>\n" +
" <configservers>\n" +
" <configserver hostalias=\"vespa-1\"/>\n" +
" </configservers>\n" +
" </admin>\n" +
"\n" +
" <container id=\"container\" version=\"1.0\">\n" +
" <document-processing/>\n" +
" <document-api/>\n" +
" <search/>\n" +
" <nodes jvmargs=\"-Xms512m -Xmx512m\">\n" +
" <node hostalias=\"vespa-1\"/>\n" +
" <node hostalias=\"vespa-2\"/>\n" +
" <node hostalias=\"vespa-3\"/>\n" +
" </nodes>\n" +
" </container>\n" +
"\n" +
" <content id=\"storage\" version=\"1.0\">\n" +
" <search>\n" +
" <visibility-delay>1.0</visibility-delay>\n" +
" </search>\n" +
" <redundancy>2</redundancy>\n" +
" <documents>\n" +
" <document type=\"type1\" mode=\"index\"/>\n" +
" <document-processing cluster=\"container\"/>\n" +
" </documents>\n" +
" <nodes>\n" +
" <node hostalias=\"vespa-1\" distribution-key=\"0\"/>\n" +
" <node hostalias=\"vespa-2\" distribution-key=\"1\"/>\n" +
" <node hostalias=\"vespa-3\" distribution-key=\"2\"/>\n" +
" </nodes>\n" +
" </content>\n" +
"\n" +
"</services>";
VespaModel model = createNonProvisionedModel(false, hosts, services);
assertEquals(3, model.getRoot().getHostSystem().getHosts().size());
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(3, content.getRootGroup().getNodes().size());
} | "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + | public void testSharedNodesNotHosted() {
String hosts =
"<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
"<hosts>\n" +
" <host name=\"vespa-1\">\n" +
" <alias>vespa-1</alias>\n" +
" </host>\n" +
" <host name=\"vespa-2\">\n" +
" <alias>vespa-2</alias>\n" +
" </host>\n" +
" <host name=\"vespa-3\">\n" +
" <alias>vespa-3</alias>\n" +
" </host>\n" +
"</hosts>";
String services =
"<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
"<services version=\"1.0\">\n" +
"\n" +
" <admin version=\"2.0\">\n" +
" <adminserver hostalias=\"vespa-1\"/>\n" +
" <configservers>\n" +
" <configserver hostalias=\"vespa-1\"/>\n" +
" </configservers>\n" +
" </admin>\n" +
"\n" +
" <container id=\"container\" version=\"1.0\">\n" +
" <document-processing/>\n" +
" <document-api/>\n" +
" <search/>\n" +
" <nodes jvmargs=\"-Xms512m -Xmx512m\">\n" +
" <node hostalias=\"vespa-1\"/>\n" +
" <node hostalias=\"vespa-2\"/>\n" +
" <node hostalias=\"vespa-3\"/>\n" +
" </nodes>\n" +
" </container>\n" +
"\n" +
" <content id=\"storage\" version=\"1.0\">\n" +
" <search>\n" +
" <visibility-delay>1.0</visibility-delay>\n" +
" </search>\n" +
" <redundancy>2</redundancy>\n" +
" <documents>\n" +
" <document type=\"type1\" mode=\"index\"/>\n" +
" <document-processing cluster=\"container\"/>\n" +
" </documents>\n" +
" <nodes>\n" +
" <node hostalias=\"vespa-1\" distribution-key=\"0\"/>\n" +
" <node hostalias=\"vespa-2\" distribution-key=\"1\"/>\n" +
" <node hostalias=\"vespa-3\" distribution-key=\"2\"/>\n" +
" </nodes>\n" +
" </content>\n" +
"\n" +
"</services>";
VespaModel model = createNonProvisionedModel(false, hosts, services);
assertEquals(3, model.getRoot().getHostSystem().getHosts().size());
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(3, content.getRootGroup().getNodes().size());
} | class ModelProvisioningTest {
@Test
public void testNodeCountForJdisc() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>\n" +
"\n" +
"<admin version='3.0'><nodes count='1' /></admin>\n" +
"<jdisc id='mydisc' version='1.0'>" +
" <handler id='myHandler'>" +
" <component id='injected' />" +
" </handler>" +
" <nodes count=\"3\"/>" +
"</jdisc>" +
"<jdisc id='mydisc2' version='1.0'>" +
" <document-processing/>" +
" <handler id='myHandler'>" +
" <component id='injected' />" +
" </handler>" +
" <nodes count='2' allocated-memory='45%' jvmargs='-verbosegc' preload='lib/blablamalloc.so'/>" +
"</jdisc>" +
"</services>";
String hosts ="<hosts>"
+ " <host name='myhost0'>"
+ " <alias>node0</alias>"
+ " </host>"
+ " <host name='myhost1'>"
+ " <alias>node1</alias>"
+ " </host>"
+ " <host name='myhost2'>"
+ " <alias>node2</alias>"
+ " </host>"
+ " <host name='myhost3'>"
+ " <alias>node3</alias>"
+ " </host>"
+ " <host name='myhost4'>"
+ " <alias>node4</alias>"
+ " </host>"
+ " <host name='myhost5'>"
+ " <alias>node5</alias>"
+ " </host>"
+ "</hosts>";
VespaModelCreatorWithMockPkg creator = new VespaModelCreatorWithMockPkg(null, services);
VespaModel model = creator.create(new DeployState.Builder().modelHostProvisioner(new InMemoryProvisioner(Hosts.readFrom(new StringReader(hosts)), true)));
assertThat(model.getContainerClusters().get("mydisc").getContainers().size(), is(3));
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(0).getConfigId(), is("mydisc/container.0"));
assertTrue(model.getContainerClusters().get("mydisc").getContainers().get(0).isInitialized());
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(1).getConfigId(), is("mydisc/container.1"));
assertTrue(model.getContainerClusters().get("mydisc").getContainers().get(1).isInitialized());
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(2).getConfigId(), is("mydisc/container.2"));
assertTrue(model.getContainerClusters().get("mydisc").getContainers().get(2).isInitialized());
assertThat(model.getContainerClusters().get("mydisc2").getContainers().size(), is(2));
assertThat(model.getContainerClusters().get("mydisc2").getContainers().get(0).getConfigId(), is("mydisc2/container.0"));
assertTrue(model.getContainerClusters().get("mydisc2").getContainers().get(0).isInitialized());
assertThat(model.getContainerClusters().get("mydisc2").getContainers().get(1).getConfigId(), is("mydisc2/container.1"));
assertTrue(model.getContainerClusters().get("mydisc2").getContainers().get(1).isInitialized());
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(0).getJvmArgs(), is(""));
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(1).getJvmArgs(), is(""));
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(2).getJvmArgs(), is(""));
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(0).getPreLoad(), is(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so")));
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(1).getPreLoad(), is(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so")));
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(2).getPreLoad(), is(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so")));
assertThat(model.getContainerClusters().get("mydisc").getMemoryPercentage(), is(Optional.empty()));
assertThat(model.getContainerClusters().get("mydisc2").getContainers().get(0).getJvmArgs(), is("-verbosegc"));
assertThat(model.getContainerClusters().get("mydisc2").getContainers().get(1).getJvmArgs(), is("-verbosegc"));
assertThat(model.getContainerClusters().get("mydisc2").getContainers().get(0).getPreLoad(), is("lib/blablamalloc.so"));
assertThat(model.getContainerClusters().get("mydisc2").getContainers().get(1).getPreLoad(), is("lib/blablamalloc.so"));
assertThat(model.getContainerClusters().get("mydisc2").getMemoryPercentage(), is(Optional.of(45)));
HostSystem hostSystem = model.getHostSystem();
assertNotNull(hostSystem.getHostByHostname("myhost0"));
assertNotNull(hostSystem.getHostByHostname("myhost1"));
assertNotNull(hostSystem.getHostByHostname("myhost2"));
assertNotNull(hostSystem.getHostByHostname("myhost3"));
assertNull(hostSystem.getHostByHostname("Nope"));
}
@Test
public void testNodeCountForContentGroup() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
"\n" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
int numberOfHosts = 2;
tester.addHosts(numberOfHosts);
int numberOfContentNodes = 2;
VespaModel model = tester.createModel(xmlWithNodes, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
final Map<String, ContentCluster> contentClusters = model.getContentClusters();
ContentCluster cluster = contentClusters.get("bar");
assertThat(cluster.getRootGroup().getNodes().size(), is(numberOfContentNodes));
int i = 0;
for (StorageNode node : cluster.getRootGroup().getNodes())
assertEquals(i++, node.getDistributionKey());
}
@Test
public void testSeparateClusters() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes count='1'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(3);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size());
assertEquals("Nodes in container1", 1, model.getContainerClusters().get("container1").getContainers().size());
assertEquals("Heap size for container", 60, physicalMemoryPercentage(model.getContainerClusters().get("container1")));
}
@Test
public void testClusterMembership() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes count='1'/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals(1, model.getHostSystem().getHosts().size());
HostResource host = model.getHostSystem().getHosts().iterator().next();
assertEquals(1, host.clusterMemberships().size());
ClusterMembership membership = host.clusterMemberships().iterator().next();
assertEquals("container", membership.cluster().type().name());
assertEquals("container1", membership.cluster().id().value());
}
@Test
public void testCombinedCluster() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes of='content1'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size());
assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size());
assertEquals("Heap size is lowered with combined clusters",
17, physicalMemoryPercentage(model.getContainerClusters().get("container1")));
}
@Test
public void testCombinedClusterWithJvmArgs() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <document-processing/>" +
" <nodes of='content1' jvmargs='testarg'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size());
assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size());
for (Container container : model.getContainerClusters().get("container1").getContainers())
assertTrue(container.getJvmArgs().contains("testarg"));
}
@Test
public void testMultipleCombinedClusters() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes of='content1'/>" +
" </container>" +
" <container version='1.0' id='container2'>" +
" <nodes of='content2'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
" <content version='1.0' id='content2'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='3'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(5);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size());
assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size());
assertEquals("Nodes in content2", 3, model.getContentClusters().get("content2").getRootGroup().getNodes().size());
assertEquals("Nodes in container2", 3, model.getContainerClusters().get("container2").getContainers().size());
}
@Test
public void testNonExistingCombinedClusterReference() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes of='container2'/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
try {
tester.createModel(xmlWithNodes, true);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("container cluster 'container1' references service 'container2' but this service is not defined", e.getMessage());
}
}
@Test
public void testInvalidCombinedClusterReference() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes of='container2'/><!-- invalid; only content clusters can be referenced -->" +
" </container>" +
" <container version='1.0' id='container2'>" +
" <nodes count='2'/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
try {
tester.createModel(xmlWithNodes, true);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("container cluster 'container1' references service 'container2', but that is not a content service", e.getMessage());
}
}
@Test
public void testUsingNodesAndGroupCountAttributes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='27' groups='9'/>" +
" </content>" +
" <content version='1.0' id='baz'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='27' groups='27'/>" +
" </content>" +
"</services>";
int numberOfHosts = 64;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
assertEquals(1, model.getContainerClusters().size());
Set<com.yahoo.vespa.model.Host> containerHosts = model.getContainerClusters().get("foo").getContainers().stream().map(Container::getHost).collect(Collectors.toSet());
assertEquals(10, containerHosts.size());
Admin admin = model.getAdmin();
Set<com.yahoo.vespa.model.Host> slobrokHosts = admin.getSlobroks().stream().map(Slobrok::getHost).collect(Collectors.toSet());
assertEquals(3, slobrokHosts.size());
assertTrue("Slobroks are assigned from container nodes", containerHosts.containsAll(slobrokHosts));
assertTrue("Logserver is assigned from container nodes", containerHosts.contains(admin.getLogserver().getHost()));
assertEquals("No in-cluster config servers in a hosted environment", 0, admin.getConfigservers().size());
assertEquals("No admin cluster controller when multitenant", null, admin.getClusterControllers());
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(3, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default28", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default31", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default54", clusterControllers.getContainers().get(2).getHostName());
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(9, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(3));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("bar/storage/0"));
assertEquals("default54", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getDistributionKey(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getConfigId(), is("bar/storage/1"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(2).getDistributionKey(), is(2));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(2).getConfigId(), is("bar/storage/2"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(3));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(3));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("bar/storage/3"));
assertEquals("default51", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getDistributionKey(), is(4));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getConfigId(), is("bar/storage/4"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(2).getDistributionKey(), is(5));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(2).getConfigId(), is("bar/storage/5"));
assertEquals("default48", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(8).getIndex(), is("8"));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().size(), is(3));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(0).getDistributionKey(), is(24));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(0).getConfigId(), is("bar/storage/24"));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(1).getDistributionKey(), is(25));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(1).getConfigId(), is("bar/storage/25"));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(2).getDistributionKey(), is(26));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(2).getConfigId(), is("bar/storage/26"));
cluster = model.getContentClusters().get("baz");
clusterControllers = cluster.getClusterControllers();
assertEquals(3, clusterControllers.getContainers().size());
assertEquals("baz-controllers", clusterControllers.getName());
assertEquals("default01", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default02", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default27", clusterControllers.getContainers().get(2).getHostName());
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(27, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("baz/storage/0"));
assertEquals("default27", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("baz/storage/1"));
assertEquals("default26", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName());
assertEquals("default25", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(26).getIndex(), is("26"));
assertThat(cluster.getRootGroup().getSubgroups().get(26).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(26).getNodes().get(0).getDistributionKey(), is(26));
assertThat(cluster.getRootGroup().getSubgroups().get(26).getNodes().get(0).getConfigId(), is("baz/storage/26"));
}
@Test
public void testGroupsOfSize1() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='8' groups='8'/>" +
" </content>" +
"</services>";
int numberOfHosts = 18;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(3, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default01", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default02", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default08", clusterControllers.getContainers().get(2).getHostName());
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(8, cluster.getRootGroup().getSubgroups().size());
assertEquals(8, cluster.distributionBits());
assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("bar/storage/0"));
assertEquals("default08", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("bar/storage/1"));
assertEquals("default07", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(7).getIndex(), is("7"));
assertThat(cluster.getRootGroup().getSubgroups().get(7).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(7).getNodes().get(0).getDistributionKey(), is(7));
assertThat(cluster.getRootGroup().getSubgroups().get(7).getNodes().get(0).getConfigId(), is("bar/storage/7"));
assertEquals("default01", cluster.getRootGroup().getSubgroups().get(7).getNodes().get(0).getHostName());
}
@Test
public void testExplicitNonDedicatedClusterControllers() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <controllers><nodes dedicated='false' count='6'/></controllers>" +
" <nodes count='9' groups='3'/>" +
" </content>" +
"</services>";
int numberOfHosts = 19;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals( 8, cluster.distributionBits());
assertEquals("We get the closest odd number", 5, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default01", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default02", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default04", clusterControllers.getContainers().get(2).getHostName());
assertEquals("default05", clusterControllers.getContainers().get(3).getHostName());
assertEquals("default07", clusterControllers.getContainers().get(4).getHostName());
assertEquals("default09", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName());
assertEquals("default08", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getHostName());
assertEquals("default06", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName());
assertEquals("default03", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName());
}
@Test
public void testClusterControllersWithGroupSize2() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='8' groups='4'/>" +
" </content>" +
"</services>";
int numberOfHosts = 18;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals("We get the closest odd number", 3, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default01", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default03", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default08", clusterControllers.getContainers().get(2).getHostName());
}
@Test
public void testClusterControllersCanSupplementWithAllContainerClusters() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo1'>" +
" <nodes count='2'/>" +
" </container>" +
" <container version='1.0' id='foo2'>" +
" <nodes count='1'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <controllers><nodes dedicated='false' count='5'/></controllers>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
int numberOfHosts = 5;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(1, clusterControllers.getContainers().size());
}
@Test
public void testClusterControllersAreNotPlacedOnRetiredNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='9' groups='3'/>" +
" </content>" +
"</services>";
int numberOfHosts = 19;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "default09", "default06", "default03");
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(3, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("Skipping retired default09", "default01", clusterControllers.getContainers().get(0).getHostName());
assertEquals("Skipping retired default03", "default04", clusterControllers.getContainers().get(1).getHostName());
assertEquals("Skipping retired default06", "default08", clusterControllers.getContainers().get(2).getHostName());
}
@Test
public void testSlobroksClustersAreExpandedToIncludeRetiredNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
"</services>";
int numberOfHosts = 10;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "default09");
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
assertEquals("Includes retired node", 1+3, model.getAdmin().getSlobroks().size());
assertEquals("default01", model.getAdmin().getSlobroks().get(0).getHostName());
assertEquals("default02", model.getAdmin().getSlobroks().get(1).getHostName());
assertEquals("default10", model.getAdmin().getSlobroks().get(2).getHostName());
assertEquals("Included in addition because it is retired", "default09", model.getAdmin().getSlobroks().get(3).getHostName());
}
@Test
public void testSlobroksClustersAreExpandedToIncludeRetiredNodesWhenRetiredComesLast() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
"</services>";
int numberOfHosts = 10;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "default09", "default08");
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
assertEquals("Includes retired node", 3+2, model.getAdmin().getSlobroks().size());
assertEquals("default01", model.getAdmin().getSlobroks().get(0).getHostName());
assertEquals("default02", model.getAdmin().getSlobroks().get(1).getHostName());
assertEquals("default10", model.getAdmin().getSlobroks().get(2).getHostName());
assertEquals("Included in addition because it is retired", "default08", model.getAdmin().getSlobroks().get(3).getHostName());
assertEquals("Included in addition because it is retired", "default09", model.getAdmin().getSlobroks().get(4).getHostName());
}
@Test
public void testSlobroksAreSpreadOverAllContainerClusters() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <container version='1.0' id='bar'>" +
" <nodes count='3'/>" +
" </container>" +
"</services>";
int numberOfHosts = 13;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "default12", "default03", "default02");
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
assertEquals("Includes retired node", 3+3, model.getAdmin().getSlobroks().size());
assertEquals("default04", model.getAdmin().getSlobroks().get(0).getHostName());
assertEquals("default13", model.getAdmin().getSlobroks().get(1).getHostName());
assertEquals("Included in addition because it is retired", "default12", model.getAdmin().getSlobroks().get(2).getHostName());
assertEquals("default01", model.getAdmin().getSlobroks().get(3).getHostName());
assertEquals("Included in addition because it is retired", "default02", model.getAdmin().getSlobroks().get(4).getHostName());
assertEquals("Included in addition because it is retired", "default03", model.getAdmin().getSlobroks().get(5).getHostName());
}
@Test
public void testSlobroksAreSpreadOverAllContainerClustersExceptNodeAdmin() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='routing'>" +
" <nodes count='10'/>" +
" </container>" +
" <container version='1.0' id='node-admin'>" +
" <nodes count='3'/>" +
" </container>" +
"</services>";
int numberOfHosts = 13;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
tester.setApplicationId("hosted-vespa", "routing", "default");
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
Set<String> routingHosts = getClusterHostnames(model, "routing");
assertEquals(10, routingHosts.size());
Set<String> nodeAdminHosts = getClusterHostnames(model, "node-admin");
assertEquals(3, nodeAdminHosts.size());
Set<String> slobrokHosts = model.getAdmin().getSlobroks().stream()
.map(AbstractService::getHostName)
.collect(Collectors.toSet());
assertEquals(3, slobrokHosts.size());
assertThat(slobrokHosts, everyItem(isIn(routingHosts)));
assertThat(slobrokHosts, everyItem(not(isIn(nodeAdminHosts))));
}
private Set<String> getClusterHostnames(VespaModel model, String clusterId) {
return model.getHosts().stream()
.filter(host -> host.getServices().stream()
.anyMatch(serviceInfo -> Objects.equals(
serviceInfo.getProperty("clustername"),
Optional.of(clusterId))))
.map(HostInfo::getHostname)
.collect(Collectors.toSet());
}
@Test
public void test2ContentNodesProduces1ClusterController() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
int numberOfHosts = 2;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(1, clusterControllers.getContainers().size());
}
@Test
public void test2ContentNodesWithContainerClusterProducesMixedClusterControllerCluster() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container version='1.0' id='foo'>" +
" <nodes count='3'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(5);
VespaModel model = tester.createModel(services, true);
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(1, clusterControllers.getContainers().size());
}
@Ignore
@Test
public void test2ContentNodesOn2ClustersWithContainerClusterProducesMixedClusterControllerCluster() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container version='1.0' id='container'>" +
" <nodes count='3' flavor='container-node'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2' flavor='content1-node'/>" +
" </content>" +
" <content version='1.0' id='content2'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2' flavor='content2-node'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts("container-node", 3);
tester.addHosts("content1-node", 2);
tester.addHosts("content2-node", 2);
VespaModel model = tester.createModel(services, true);
ContentCluster cluster1 = model.getContentClusters().get("content1");
ContainerCluster clusterControllers1 = cluster1.getClusterControllers();
assertEquals(1, clusterControllers1.getContainers().size());
assertEquals("content1-node0", clusterControllers1.getContainers().get(0).getHostName());
assertEquals("content1-node1", clusterControllers1.getContainers().get(1).getHostName());
assertEquals("container-node0", clusterControllers1.getContainers().get(2).getHostName());
ContentCluster cluster2 = model.getContentClusters().get("content2");
ContainerCluster clusterControllers2 = cluster2.getClusterControllers();
assertEquals(3, clusterControllers2.getContainers().size());
assertEquals("content2-node0", clusterControllers2.getContainers().get(0).getHostName());
assertEquals("content2-node1", clusterControllers2.getContainers().get(1).getHostName());
assertEquals("We do not pick the container used to supplement another cluster",
"container-node1", clusterControllers2.getContainers().get(2).getHostName());
}
@Test
public void testExplicitDedicatedClusterControllers() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <controllers><nodes dedicated='true' count='4'/></controllers>" +
" <nodes count='9' groups='3'/>" +
" </content>" +
"</services>";
int numberOfHosts = 23;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(4, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default04", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default03", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default02", clusterControllers.getContainers().get(2).getHostName());
assertEquals("default01", clusterControllers.getContainers().get(3).getHostName());
}
@Test
public void testUsingNodesAndGroupCountAttributesAndGettingTooFewNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='3'>4</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24' groups='3'/>" +
" <engine><proton><searchable-copies>3</searchable-copies></proton></engine>" +
" </content>" +
"</services>";
int numberOfHosts = 6;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(2*3, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(2*3, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(2*3, cluster.redundancy().effectiveReadyCopies());
assertEquals("2|2|*", cluster.getRootGroup().getPartitions().get());
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(3, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(2));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("bar/storage/0"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getDistributionKey(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getConfigId(), is("bar/storage/1"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(2));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(2));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("bar/storage/2"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getDistributionKey(), is(3));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getConfigId(), is("bar/storage/3"));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getIndex(), is("2"));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().size(), is(2));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getDistributionKey(), is(4));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getConfigId(), is("bar/storage/4"));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(1).getDistributionKey(), is(5));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(1).getConfigId(), is("bar/storage/5"));
}
@Test
public void testUsingNodesCountAttributesAndGettingTooFewNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='8'>12</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24'/>" +
" <engine><proton><searchable-copies>5</searchable-copies></proton></engine>" +
" <dispatch><num-dispatch-groups>7</num-dispatch-groups></dispatch>" +
" </content>" +
"</services>";
int numberOfHosts = 4;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(4, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(4, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(4, cluster.redundancy().effectiveReadyCopies());
assertEquals(4, cluster.getSearch().getIndexed().getDispatchSpec().getGroups().size());
assertFalse(cluster.getRootGroup().getPartitions().isPresent());
assertEquals(4, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getNodes().size(), is(4));
assertThat(cluster.getRootGroup().getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0"));
assertThat(cluster.getRootGroup().getNodes().get(1).getDistributionKey(), is(1));
assertThat(cluster.getRootGroup().getNodes().get(1).getConfigId(), is("bar/storage/1"));
assertThat(cluster.getRootGroup().getNodes().get(2).getDistributionKey(), is(2));
assertThat(cluster.getRootGroup().getNodes().get(2).getConfigId(), is("bar/storage/2"));
assertThat(cluster.getRootGroup().getNodes().get(3).getDistributionKey(), is(3));
assertThat(cluster.getRootGroup().getNodes().get(3).getConfigId(), is("bar/storage/3"));
}
@Test
public void testUsingNodesAndGroupCountAttributesAndGettingJustOneNode() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='3'>4</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24' groups='3'/>" +
" <engine><proton><searchable-copies>3</searchable-copies></proton></engine>" +
" </content>" +
"</services>";
int numberOfHosts = 1;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(1, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default01", clusterControllers.getContainers().get(0).getHostName());
assertEquals(1, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(1, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(1, cluster.redundancy().effectiveReadyCopies());
assertFalse(cluster.getRootGroup().getPartitions().isPresent());
assertEquals(1, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0"));
}
@Test(expected = IllegalArgumentException.class)
public void testRequiringMoreNodesThanAreAvailable() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <content version='1.0' id='bar'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='3' required='true'/>" +
" </content>" +
"</services>";
int numberOfHosts = 2;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
tester.createModel(services, false);
}
@Test
public void testUsingNodesCountAttributesAndGettingJustOneNode() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='8'>12</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24'/>" +
" <engine><proton><searchable-copies>5</searchable-copies></proton></engine>" +
" <dispatch><num-dispatch-groups>7</num-dispatch-groups></dispatch>" +
" </content>" +
"</services>";
int numberOfHosts = 1;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(1, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(1, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(1, cluster.redundancy().effectiveReadyCopies());
assertEquals(1, cluster.getSearch().getIndexed().getDispatchSpec().getGroups().size());
assertFalse(cluster.getRootGroup().getPartitions().isPresent());
assertEquals(1, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0"));
}
@Test
public void testRequestingSpecificFlavors() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'>" +
" <logservers><nodes count='1' dedicated='true' flavor='logserver-flavor'/></logservers>" +
" <slobroks><nodes count='2' dedicated='true' flavor='slobrok-flavor'/></slobroks>" +
" </admin>" +
" <container version='1.0' id='container'>" +
" <nodes count='4' flavor='container-flavor'/>" +
" </container>" +
" <content version='1.0' id='foo'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <controllers><nodes count='2' dedicated='true' flavor='controller-foo-flavor'/></controllers>" +
" <nodes count='5' flavor='content-foo-flavor'/>" +
" </content>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <controllers><nodes count='3' dedicated='true' flavor='controller-bar-flavor'/></controllers>" +
" <nodes count='6' flavor='content-bar-flavor'/>" +
" </content>" +
"</services>";
int totalHosts = 23;
VespaModelTester tester = new VespaModelTester();
tester.addHosts("logserver-flavor", 1);
tester.addHosts("slobrok-flavor", 2);
tester.addHosts("container-flavor", 4);
tester.addHosts("controller-foo-flavor", 2);
tester.addHosts("content-foo-flavor", 5);
tester.addHosts("controller-bar-flavor", 3);
tester.addHosts("content-bar-flavor", 6);
VespaModel model = tester.createModel(services, true, 0);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(totalHosts));
}
@Test
public void testJDiscOnly() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<jdisc version='1.0'>" +
" <search/>" +
" <nodes count='3'/>" +
"</jdisc>";
int numberOfHosts = 3;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertEquals(numberOfHosts, model.getRoot().getHostSystem().getHosts().size());
assertEquals(3, model.getContainerClusters().get("jdisc").getContainers().size());
assertNotNull(model.getAdmin().getLogserver());
assertEquals(3, model.getAdmin().getSlobroks().size());
}
@Test
public void testUsingHostaliasWithProvisioner() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
"<admin version='2.0'>" +
" <adminserver hostalias='node1'/>\n"+
"</admin>\n" +
"<jdisc id='mydisc' version='1.0'>" +
" <handler id='myHandler'>" +
" <component id='injected' />" +
" </handler>" +
" <nodes>" +
" <node hostalias='node1'/>" +
" </nodes>" +
"</jdisc>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().getHostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
}
@Test
public void testThatStandaloneSyntaxWorksOnHostedVespa() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<jdisc id='foo' version='1.0'>" +
" <http>" +
" <server id='server1' port='" + getDefaults().vespaWebServicePort() + "' />" +
" </http>" +
"</jdisc>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertThat(model.getHosts().size(), is(1));
assertThat(model.getContainerClusters().size(), is(1));
}
@Test
public void testNoNodeTagMeans1Node() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <jdisc id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" </jdisc>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().getHostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes());
}
@Test
public void testNoNodeTagMeans1NodeNoContent() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <jdisc id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" </jdisc>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().getHostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
}
@Test
public void testNoNodeTagMeans1NodeNonHosted() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <jdisc id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" </jdisc>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(false);
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().getHostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
assertEquals(1, model.getContentClusters().get("bar").getRootGroup().recursiveGetNodes().size());
}
@Test
public void testSingleNodeNonHosted() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <jdisc id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" <nodes><node hostalias='foo'/></nodes>"+
" </jdisc>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes><node hostalias='foo' distribution-key='0'/></nodes>"+
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(false);
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().getHostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes());
}
/** Recreate the combination used in some factory tests */
@Test
public void testMultitenantButNotHosted() {
String services =
"<?xml version='1.0' encoding='UTF-8' ?>" +
"<services version='1.0'>" +
" <admin version='2.0'>" +
" <adminserver hostalias='node1'/>" +
" </admin>" +
" <jdisc id='default' version='1.0'>" +
" <search/>" +
" <nodes>" +
" <node hostalias='node1'/>" +
" </nodes>" +
" </jdisc>" +
" <content id='storage' version='1.0'>" +
" <redundancy>2</redundancy>" +
" <group>" +
" <node distribution-key='0' hostalias='node1'/>" +
" <node distribution-key='1' hostalias='node1'/>" +
" </group>" +
" <tuning>" +
" <cluster-controller>" +
" <transition-time>0</transition-time>" +
" </cluster-controller>" +
" </tuning>" +
" <documents>" +
" <document mode='store-only' type='type1'/>" +
" </documents>" +
" <engine>" +
" <proton/>" +
" </engine>" +
" </content>" +
" </services>";
VespaModel model = createNonProvisionedMultitenantModel(services);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(1));
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(2, content.getRootGroup().getNodes().size());
ContainerCluster controller = content.getClusterControllers();
assertEquals(1, controller.getContainers().size());
}
@Test
@Test
public void testMultitenantButNotHostedSharedContentNode() {
String services =
"<?xml version='1.0' encoding='UTF-8' ?>" +
"<services version='1.0'>" +
" <admin version='2.0'>" +
" <adminserver hostalias='node1'/>" +
" </admin>" +
" <jdisc id='default' version='1.0'>" +
" <search/>" +
" <nodes>" +
" <node hostalias='node1'/>" +
" </nodes>" +
" </jdisc>" +
" <content id='storage' version='1.0'>" +
" <redundancy>2</redundancy>" +
" <group>" +
" <node distribution-key='0' hostalias='node1'/>" +
" <node distribution-key='1' hostalias='node1'/>" +
" </group>" +
" <tuning>" +
" <cluster-controller>" +
" <transition-time>0</transition-time>" +
" </cluster-controller>" +
" </tuning>" +
" <documents>" +
" <document mode='store-only' type='type1'/>" +
" </documents>" +
" <engine>" +
" <proton/>" +
" </engine>" +
" </content>" +
" <content id='search' version='1.0'>" +
" <redundancy>2</redundancy>" +
" <group>" +
" <node distribution-key='0' hostalias='node1'/>" +
" </group>" +
" <documents>" +
" <document type='type1'/>" +
" </documents>" +
" </content>" +
" </services>";
VespaModel model = createNonProvisionedMultitenantModel(services);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(1));
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(2, content.getRootGroup().getNodes().size());
ContainerCluster controller = content.getClusterControllers();
assertEquals(1, controller.getContainers().size());
}
private VespaModel createNonProvisionedMultitenantModel(String services) {
return createNonProvisionedModel(true, null, services);
}
private VespaModel createNonProvisionedModel(boolean multitenant, String hosts, String services) {
VespaModelCreatorWithMockPkg modelCreatorWithMockPkg = new VespaModelCreatorWithMockPkg(hosts, services, ApplicationPackageUtils.generateSearchDefinition("type1"));
ApplicationPackage appPkg = modelCreatorWithMockPkg.appPkg;
DeployState deployState = new DeployState.Builder().applicationPackage(appPkg).
properties((new DeployProperties.Builder()).multitenant(multitenant).build()).
build(true);
return modelCreatorWithMockPkg.create(false, deployState);
}
@Test
public void testThatTldConfigIdsAreDeterministic() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <jdisc version='1.0' id='jdisc0'>" +
" <search/>" +
" <nodes count='2'/>" +
" </jdisc>" +
" <jdisc version='1.0' id='jdisc1'>" +
" <search/>" +
" <nodes count='2'/>" +
" </jdisc>" +
" <content version='1.0' id='content0'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
int numberOfHosts = 8;
{
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
Map<String, ContentCluster> contentClusters = model.getContentClusters();
assertEquals(2, contentClusters.size());
checkThatTldAndContainerRunningOnSameHostHaveSameId(
model.getContainerClusters().values(),
model.getContentClusters().values(),
0);
}
{
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts + 1);
VespaModel model = tester.createModel(services, true, 1, "default0");
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
Map<String, ContentCluster> contentClusters = model.getContentClusters();
assertEquals(2, contentClusters.size());
checkThatTldAndContainerRunningOnSameHostHaveSameId(
model.getContainerClusters().values(),
model.getContentClusters().values(),
1);
}
}
private void checkThatTldAndContainerRunningOnSameHostHaveSameId(Collection<ContainerCluster> containerClusters,
Collection<ContentCluster> contentClusters,
int startIndexForContainerIds) {
for (ContentCluster contentCluster : contentClusters) {
String contentClusterName = contentCluster.getName();
int i = 0;
for (ContainerCluster containerCluster : containerClusters) {
String containerClusterName = containerCluster.getName();
for (int j = 0; j < 2; j++) {
Dispatch tld = contentCluster.getSearch().getIndexed().getTLDs().get(2 * i + j);
Container container = containerCluster.getContainers().get(j);
int containerConfigIdIndex = j + startIndexForContainerIds;
assertEquals(container.getHostName(), tld.getHostname());
assertEquals(contentClusterName + "/search/cluster." + contentClusterName + "/tlds/" +
containerClusterName + "." + containerConfigIdIndex + ".tld." + containerConfigIdIndex,
tld.getConfigId());
assertEquals(containerClusterName + "/" + "container." + containerConfigIdIndex,
container.getConfigId());
}
i++;
}
}
}
private int physicalMemoryPercentage(ContainerCluster cluster) {
QrStartConfig.Builder b = new QrStartConfig.Builder();
cluster.getSearch().getConfig(b);
return new QrStartConfig(b).jvm().heapSizeAsPercentageOfPhysicalMemory();
}
@Test
public void require_that_proton_config_is_tuned_based_on_node_flavor() {
String services = joinLines("<?xml version='1.0' encoding='utf-8' ?>",
"<services>",
" <content version='1.0' id='test'>",
" <documents>",
" <document type='type1' mode='index'/>",
" </documents>",
" <nodes count='2' flavor='content-test-flavor'/>",
" </content>",
"</services>");
VespaModelTester tester = new VespaModelTester();
tester.addHosts(createFlavorFromDiskSetting("content-test-flavor", false), 2);
VespaModel model = tester.createModel(services, true, 0);
ContentSearchCluster cluster = model.getContentClusters().get("test").getSearch();
assertEquals(2, cluster.getSearchNodes().size());
assertEquals(40, getProtonConfig(cluster, 0).hwinfo().disk().writespeed(), 0.001);
assertEquals(40, getProtonConfig(cluster, 1).hwinfo().disk().writespeed(), 0.001);
}
private static Flavor createFlavorFromDiskSetting(String name, boolean fastDisk) {
return new Flavor(new FlavorsConfig.Flavor(new FlavorsConfig.Flavor.Builder().
name(name).fastDisk(fastDisk)));
}
private static ProtonConfig getProtonConfig(ContentSearchCluster cluster, int searchNodeIdx) {
ProtonConfig.Builder builder = new ProtonConfig.Builder();
List<SearchNode> searchNodes = cluster.getSearchNodes();
assertTrue(searchNodeIdx < searchNodes.size());
searchNodes.get(searchNodeIdx).getConfig(builder);
return new ProtonConfig(builder);
}
@Test
public void require_that_config_override_and_explicit_proton_tuning_have_precedence_over_default_node_flavor_tuning() {
String services = joinLines("<?xml version='1.0' encoding='utf-8' ?>",
"<services>",
" <content version='1.0' id='test'>",
" <config name='vespa.config.search.core.proton'>",
" <flush><memory><maxtlssize>2000</maxtlssize></memory></flush>",
" </config>",
" <documents>",
" <document type='type1' mode='index'/>",
" </documents>",
" <nodes count='1' flavor='content-test-flavor'/>",
" <engine>",
" <proton>",
" <tuning>",
" <searchnode>",
" <flushstrategy>",
" <native>",
" <total>",
" <maxmemorygain>1000</maxmemorygain>",
" </total>",
" </native>",
" </flushstrategy>",
" </searchnode>",
" </tuning>",
" </proton>",
" </engine>",
" </content>",
"</services>");
VespaModelTester tester = new VespaModelTester();
tester.addHosts("default", 1);
tester.addHosts(createFlavorFromMemoryAndDisk("content-test-flavor", 128, 100), 1);
VespaModel model = tester.createModel(services, true, 0);
ContentSearchCluster cluster = model.getContentClusters().get("test").getSearch();
ProtonConfig cfg = getProtonConfig(model, cluster.getSearchNodes().get(0).getConfigId());
assertEquals(2000, cfg.flush().memory().maxtlssize());
assertEquals(1000, cfg.flush().memory().maxmemory());
assertEquals((long) 16 * GB, cfg.flush().memory().each().maxmemory());
}
private static long GB = 1024 * 1024 * 1024;
private static Flavor createFlavorFromMemoryAndDisk(String name, int memoryGb, int diskGb) {
return new Flavor(new FlavorsConfig.Flavor(new FlavorsConfig.Flavor.Builder().
name(name).minMainMemoryAvailableGb(memoryGb).minDiskAvailableGb(diskGb)));
}
private static ProtonConfig getProtonConfig(VespaModel model, String configId) {
ProtonConfig.Builder builder = new ProtonConfig.Builder();
model.getConfig(builder, configId);
return new ProtonConfig(builder);
}
} | class ModelProvisioningTest {
@Test
public void testNodeCountForJdisc() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>\n" +
"\n" +
"<admin version='3.0'><nodes count='1' /></admin>\n" +
"<jdisc id='mydisc' version='1.0'>" +
" <handler id='myHandler'>" +
" <component id='injected' />" +
" </handler>" +
" <nodes count=\"3\"/>" +
"</jdisc>" +
"<jdisc id='mydisc2' version='1.0'>" +
" <document-processing/>" +
" <handler id='myHandler'>" +
" <component id='injected' />" +
" </handler>" +
" <nodes count='2' allocated-memory='45%' jvmargs='-verbosegc' preload='lib/blablamalloc.so'/>" +
"</jdisc>" +
"</services>";
String hosts ="<hosts>"
+ " <host name='myhost0'>"
+ " <alias>node0</alias>"
+ " </host>"
+ " <host name='myhost1'>"
+ " <alias>node1</alias>"
+ " </host>"
+ " <host name='myhost2'>"
+ " <alias>node2</alias>"
+ " </host>"
+ " <host name='myhost3'>"
+ " <alias>node3</alias>"
+ " </host>"
+ " <host name='myhost4'>"
+ " <alias>node4</alias>"
+ " </host>"
+ " <host name='myhost5'>"
+ " <alias>node5</alias>"
+ " </host>"
+ "</hosts>";
VespaModelCreatorWithMockPkg creator = new VespaModelCreatorWithMockPkg(null, services);
VespaModel model = creator.create(new DeployState.Builder().modelHostProvisioner(new InMemoryProvisioner(Hosts.readFrom(new StringReader(hosts)), true)));
assertThat(model.getContainerClusters().get("mydisc").getContainers().size(), is(3));
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(0).getConfigId(), is("mydisc/container.0"));
assertTrue(model.getContainerClusters().get("mydisc").getContainers().get(0).isInitialized());
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(1).getConfigId(), is("mydisc/container.1"));
assertTrue(model.getContainerClusters().get("mydisc").getContainers().get(1).isInitialized());
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(2).getConfigId(), is("mydisc/container.2"));
assertTrue(model.getContainerClusters().get("mydisc").getContainers().get(2).isInitialized());
assertThat(model.getContainerClusters().get("mydisc2").getContainers().size(), is(2));
assertThat(model.getContainerClusters().get("mydisc2").getContainers().get(0).getConfigId(), is("mydisc2/container.0"));
assertTrue(model.getContainerClusters().get("mydisc2").getContainers().get(0).isInitialized());
assertThat(model.getContainerClusters().get("mydisc2").getContainers().get(1).getConfigId(), is("mydisc2/container.1"));
assertTrue(model.getContainerClusters().get("mydisc2").getContainers().get(1).isInitialized());
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(0).getJvmArgs(), is(""));
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(1).getJvmArgs(), is(""));
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(2).getJvmArgs(), is(""));
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(0).getPreLoad(), is(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so")));
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(1).getPreLoad(), is(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so")));
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(2).getPreLoad(), is(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so")));
assertThat(model.getContainerClusters().get("mydisc").getMemoryPercentage(), is(Optional.empty()));
assertThat(model.getContainerClusters().get("mydisc2").getContainers().get(0).getJvmArgs(), is("-verbosegc"));
assertThat(model.getContainerClusters().get("mydisc2").getContainers().get(1).getJvmArgs(), is("-verbosegc"));
assertThat(model.getContainerClusters().get("mydisc2").getContainers().get(0).getPreLoad(), is("lib/blablamalloc.so"));
assertThat(model.getContainerClusters().get("mydisc2").getContainers().get(1).getPreLoad(), is("lib/blablamalloc.so"));
assertThat(model.getContainerClusters().get("mydisc2").getMemoryPercentage(), is(Optional.of(45)));
HostSystem hostSystem = model.getHostSystem();
assertNotNull(hostSystem.getHostByHostname("myhost0"));
assertNotNull(hostSystem.getHostByHostname("myhost1"));
assertNotNull(hostSystem.getHostByHostname("myhost2"));
assertNotNull(hostSystem.getHostByHostname("myhost3"));
assertNull(hostSystem.getHostByHostname("Nope"));
}
@Test
public void testNodeCountForContentGroup() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
"\n" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
int numberOfHosts = 2;
tester.addHosts(numberOfHosts);
int numberOfContentNodes = 2;
VespaModel model = tester.createModel(xmlWithNodes, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
final Map<String, ContentCluster> contentClusters = model.getContentClusters();
ContentCluster cluster = contentClusters.get("bar");
assertThat(cluster.getRootGroup().getNodes().size(), is(numberOfContentNodes));
int i = 0;
for (StorageNode node : cluster.getRootGroup().getNodes())
assertEquals(i++, node.getDistributionKey());
}
@Test
public void testSeparateClusters() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes count='1'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(3);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size());
assertEquals("Nodes in container1", 1, model.getContainerClusters().get("container1").getContainers().size());
assertEquals("Heap size for container", 60, physicalMemoryPercentage(model.getContainerClusters().get("container1")));
}
@Test
public void testClusterMembership() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes count='1'/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals(1, model.getHostSystem().getHosts().size());
HostResource host = model.getHostSystem().getHosts().iterator().next();
assertEquals(1, host.clusterMemberships().size());
ClusterMembership membership = host.clusterMemberships().iterator().next();
assertEquals("container", membership.cluster().type().name());
assertEquals("container1", membership.cluster().id().value());
}
@Test
public void testCombinedCluster() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes of='content1'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size());
assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size());
assertEquals("Heap size is lowered with combined clusters",
17, physicalMemoryPercentage(model.getContainerClusters().get("container1")));
}
@Test
public void testCombinedClusterWithJvmArgs() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <document-processing/>" +
" <nodes of='content1' jvmargs='testarg'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size());
assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size());
for (Container container : model.getContainerClusters().get("container1").getContainers())
assertTrue(container.getJvmArgs().contains("testarg"));
}
@Test
public void testMultipleCombinedClusters() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes of='content1'/>" +
" </container>" +
" <container version='1.0' id='container2'>" +
" <nodes of='content2'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
" <content version='1.0' id='content2'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='3'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(5);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size());
assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size());
assertEquals("Nodes in content2", 3, model.getContentClusters().get("content2").getRootGroup().getNodes().size());
assertEquals("Nodes in container2", 3, model.getContainerClusters().get("container2").getContainers().size());
}
@Test
public void testNonExistingCombinedClusterReference() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes of='container2'/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
try {
tester.createModel(xmlWithNodes, true);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("container cluster 'container1' references service 'container2' but this service is not defined", e.getMessage());
}
}
@Test
public void testInvalidCombinedClusterReference() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes of='container2'/><!-- invalid; only content clusters can be referenced -->" +
" </container>" +
" <container version='1.0' id='container2'>" +
" <nodes count='2'/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
try {
tester.createModel(xmlWithNodes, true);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("container cluster 'container1' references service 'container2', but that is not a content service", e.getMessage());
}
}
@Test
public void testUsingNodesAndGroupCountAttributes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='27' groups='9'/>" +
" </content>" +
" <content version='1.0' id='baz'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='27' groups='27'/>" +
" </content>" +
"</services>";
int numberOfHosts = 64;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
assertEquals(1, model.getContainerClusters().size());
Set<com.yahoo.vespa.model.Host> containerHosts = model.getContainerClusters().get("foo").getContainers().stream().map(Container::getHost).collect(Collectors.toSet());
assertEquals(10, containerHosts.size());
Admin admin = model.getAdmin();
Set<com.yahoo.vespa.model.Host> slobrokHosts = admin.getSlobroks().stream().map(Slobrok::getHost).collect(Collectors.toSet());
assertEquals(3, slobrokHosts.size());
assertTrue("Slobroks are assigned from container nodes", containerHosts.containsAll(slobrokHosts));
assertTrue("Logserver is assigned from container nodes", containerHosts.contains(admin.getLogserver().getHost()));
assertEquals("No in-cluster config servers in a hosted environment", 0, admin.getConfigservers().size());
assertEquals("No admin cluster controller when multitenant", null, admin.getClusterControllers());
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(3, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default28", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default31", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default54", clusterControllers.getContainers().get(2).getHostName());
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(9, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(3));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("bar/storage/0"));
assertEquals("default54", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getDistributionKey(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getConfigId(), is("bar/storage/1"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(2).getDistributionKey(), is(2));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(2).getConfigId(), is("bar/storage/2"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(3));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(3));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("bar/storage/3"));
assertEquals("default51", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getDistributionKey(), is(4));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getConfigId(), is("bar/storage/4"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(2).getDistributionKey(), is(5));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(2).getConfigId(), is("bar/storage/5"));
assertEquals("default48", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(8).getIndex(), is("8"));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().size(), is(3));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(0).getDistributionKey(), is(24));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(0).getConfigId(), is("bar/storage/24"));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(1).getDistributionKey(), is(25));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(1).getConfigId(), is("bar/storage/25"));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(2).getDistributionKey(), is(26));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(2).getConfigId(), is("bar/storage/26"));
cluster = model.getContentClusters().get("baz");
clusterControllers = cluster.getClusterControllers();
assertEquals(3, clusterControllers.getContainers().size());
assertEquals("baz-controllers", clusterControllers.getName());
assertEquals("default01", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default02", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default27", clusterControllers.getContainers().get(2).getHostName());
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(27, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("baz/storage/0"));
assertEquals("default27", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("baz/storage/1"));
assertEquals("default26", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName());
assertEquals("default25", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(26).getIndex(), is("26"));
assertThat(cluster.getRootGroup().getSubgroups().get(26).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(26).getNodes().get(0).getDistributionKey(), is(26));
assertThat(cluster.getRootGroup().getSubgroups().get(26).getNodes().get(0).getConfigId(), is("baz/storage/26"));
}
@Test
public void testGroupsOfSize1() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='8' groups='8'/>" +
" </content>" +
"</services>";
int numberOfHosts = 18;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(3, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default01", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default02", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default08", clusterControllers.getContainers().get(2).getHostName());
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(8, cluster.getRootGroup().getSubgroups().size());
assertEquals(8, cluster.distributionBits());
assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("bar/storage/0"));
assertEquals("default08", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("bar/storage/1"));
assertEquals("default07", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(7).getIndex(), is("7"));
assertThat(cluster.getRootGroup().getSubgroups().get(7).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(7).getNodes().get(0).getDistributionKey(), is(7));
assertThat(cluster.getRootGroup().getSubgroups().get(7).getNodes().get(0).getConfigId(), is("bar/storage/7"));
assertEquals("default01", cluster.getRootGroup().getSubgroups().get(7).getNodes().get(0).getHostName());
}
@Test
public void testExplicitNonDedicatedClusterControllers() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <controllers><nodes dedicated='false' count='6'/></controllers>" +
" <nodes count='9' groups='3'/>" +
" </content>" +
"</services>";
int numberOfHosts = 19;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals( 8, cluster.distributionBits());
assertEquals("We get the closest odd number", 5, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default01", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default02", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default04", clusterControllers.getContainers().get(2).getHostName());
assertEquals("default05", clusterControllers.getContainers().get(3).getHostName());
assertEquals("default07", clusterControllers.getContainers().get(4).getHostName());
assertEquals("default09", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName());
assertEquals("default08", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getHostName());
assertEquals("default06", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName());
assertEquals("default03", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName());
}
@Test
public void testClusterControllersWithGroupSize2() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='8' groups='4'/>" +
" </content>" +
"</services>";
int numberOfHosts = 18;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals("We get the closest odd number", 3, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default01", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default03", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default08", clusterControllers.getContainers().get(2).getHostName());
}
@Test
public void testClusterControllersCanSupplementWithAllContainerClusters() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo1'>" +
" <nodes count='2'/>" +
" </container>" +
" <container version='1.0' id='foo2'>" +
" <nodes count='1'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <controllers><nodes dedicated='false' count='5'/></controllers>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
int numberOfHosts = 5;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(1, clusterControllers.getContainers().size());
}
@Test
public void testClusterControllersAreNotPlacedOnRetiredNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='9' groups='3'/>" +
" </content>" +
"</services>";
int numberOfHosts = 19;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "default09", "default06", "default03");
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(3, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("Skipping retired default09", "default01", clusterControllers.getContainers().get(0).getHostName());
assertEquals("Skipping retired default03", "default04", clusterControllers.getContainers().get(1).getHostName());
assertEquals("Skipping retired default06", "default08", clusterControllers.getContainers().get(2).getHostName());
}
@Test
public void testSlobroksClustersAreExpandedToIncludeRetiredNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
"</services>";
int numberOfHosts = 10;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "default09");
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
assertEquals("Includes retired node", 1+3, model.getAdmin().getSlobroks().size());
assertEquals("default01", model.getAdmin().getSlobroks().get(0).getHostName());
assertEquals("default02", model.getAdmin().getSlobroks().get(1).getHostName());
assertEquals("default10", model.getAdmin().getSlobroks().get(2).getHostName());
assertEquals("Included in addition because it is retired", "default09", model.getAdmin().getSlobroks().get(3).getHostName());
}
@Test
public void testSlobroksClustersAreExpandedToIncludeRetiredNodesWhenRetiredComesLast() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
"</services>";
int numberOfHosts = 10;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "default09", "default08");
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
assertEquals("Includes retired node", 3+2, model.getAdmin().getSlobroks().size());
assertEquals("default01", model.getAdmin().getSlobroks().get(0).getHostName());
assertEquals("default02", model.getAdmin().getSlobroks().get(1).getHostName());
assertEquals("default10", model.getAdmin().getSlobroks().get(2).getHostName());
assertEquals("Included in addition because it is retired", "default08", model.getAdmin().getSlobroks().get(3).getHostName());
assertEquals("Included in addition because it is retired", "default09", model.getAdmin().getSlobroks().get(4).getHostName());
}
@Test
public void testSlobroksAreSpreadOverAllContainerClusters() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <container version='1.0' id='bar'>" +
" <nodes count='3'/>" +
" </container>" +
"</services>";
int numberOfHosts = 13;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "default12", "default03", "default02");
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
assertEquals("Includes retired node", 3+3, model.getAdmin().getSlobroks().size());
assertEquals("default04", model.getAdmin().getSlobroks().get(0).getHostName());
assertEquals("default13", model.getAdmin().getSlobroks().get(1).getHostName());
assertEquals("Included in addition because it is retired", "default12", model.getAdmin().getSlobroks().get(2).getHostName());
assertEquals("default01", model.getAdmin().getSlobroks().get(3).getHostName());
assertEquals("Included in addition because it is retired", "default02", model.getAdmin().getSlobroks().get(4).getHostName());
assertEquals("Included in addition because it is retired", "default03", model.getAdmin().getSlobroks().get(5).getHostName());
}
@Test
public void testSlobroksAreSpreadOverAllContainerClustersExceptNodeAdmin() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='routing'>" +
" <nodes count='10'/>" +
" </container>" +
" <container version='1.0' id='node-admin'>" +
" <nodes count='3'/>" +
" </container>" +
"</services>";
int numberOfHosts = 13;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
tester.setApplicationId("hosted-vespa", "routing", "default");
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
Set<String> routingHosts = getClusterHostnames(model, "routing");
assertEquals(10, routingHosts.size());
Set<String> nodeAdminHosts = getClusterHostnames(model, "node-admin");
assertEquals(3, nodeAdminHosts.size());
Set<String> slobrokHosts = model.getAdmin().getSlobroks().stream()
.map(AbstractService::getHostName)
.collect(Collectors.toSet());
assertEquals(3, slobrokHosts.size());
assertThat(slobrokHosts, everyItem(isIn(routingHosts)));
assertThat(slobrokHosts, everyItem(not(isIn(nodeAdminHosts))));
}
private Set<String> getClusterHostnames(VespaModel model, String clusterId) {
return model.getHosts().stream()
.filter(host -> host.getServices().stream()
.anyMatch(serviceInfo -> Objects.equals(
serviceInfo.getProperty("clustername"),
Optional.of(clusterId))))
.map(HostInfo::getHostname)
.collect(Collectors.toSet());
}
@Test
public void test2ContentNodesProduces1ClusterController() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
int numberOfHosts = 2;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(1, clusterControllers.getContainers().size());
}
@Test
public void test2ContentNodesWithContainerClusterProducesMixedClusterControllerCluster() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container version='1.0' id='foo'>" +
" <nodes count='3'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(5);
VespaModel model = tester.createModel(services, true);
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(1, clusterControllers.getContainers().size());
}
@Ignore
@Test
public void test2ContentNodesOn2ClustersWithContainerClusterProducesMixedClusterControllerCluster() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container version='1.0' id='container'>" +
" <nodes count='3' flavor='container-node'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2' flavor='content1-node'/>" +
" </content>" +
" <content version='1.0' id='content2'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2' flavor='content2-node'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts("container-node", 3);
tester.addHosts("content1-node", 2);
tester.addHosts("content2-node", 2);
VespaModel model = tester.createModel(services, true);
ContentCluster cluster1 = model.getContentClusters().get("content1");
ContainerCluster clusterControllers1 = cluster1.getClusterControllers();
assertEquals(1, clusterControllers1.getContainers().size());
assertEquals("content1-node0", clusterControllers1.getContainers().get(0).getHostName());
assertEquals("content1-node1", clusterControllers1.getContainers().get(1).getHostName());
assertEquals("container-node0", clusterControllers1.getContainers().get(2).getHostName());
ContentCluster cluster2 = model.getContentClusters().get("content2");
ContainerCluster clusterControllers2 = cluster2.getClusterControllers();
assertEquals(3, clusterControllers2.getContainers().size());
assertEquals("content2-node0", clusterControllers2.getContainers().get(0).getHostName());
assertEquals("content2-node1", clusterControllers2.getContainers().get(1).getHostName());
assertEquals("We do not pick the container used to supplement another cluster",
"container-node1", clusterControllers2.getContainers().get(2).getHostName());
}
@Test
public void testExplicitDedicatedClusterControllers() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <controllers><nodes dedicated='true' count='4'/></controllers>" +
" <nodes count='9' groups='3'/>" +
" </content>" +
"</services>";
int numberOfHosts = 23;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(4, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default04", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default03", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default02", clusterControllers.getContainers().get(2).getHostName());
assertEquals("default01", clusterControllers.getContainers().get(3).getHostName());
}
@Test
public void testUsingNodesAndGroupCountAttributesAndGettingTooFewNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='3'>4</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24' groups='3'/>" +
" <engine><proton><searchable-copies>3</searchable-copies></proton></engine>" +
" </content>" +
"</services>";
int numberOfHosts = 6;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(2*3, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(2*3, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(2*3, cluster.redundancy().effectiveReadyCopies());
assertEquals("2|2|*", cluster.getRootGroup().getPartitions().get());
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(3, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(2));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("bar/storage/0"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getDistributionKey(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getConfigId(), is("bar/storage/1"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(2));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(2));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("bar/storage/2"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getDistributionKey(), is(3));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getConfigId(), is("bar/storage/3"));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getIndex(), is("2"));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().size(), is(2));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getDistributionKey(), is(4));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getConfigId(), is("bar/storage/4"));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(1).getDistributionKey(), is(5));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(1).getConfigId(), is("bar/storage/5"));
}
@Test
public void testUsingNodesCountAttributesAndGettingTooFewNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='8'>12</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24'/>" +
" <engine><proton><searchable-copies>5</searchable-copies></proton></engine>" +
" <dispatch><num-dispatch-groups>7</num-dispatch-groups></dispatch>" +
" </content>" +
"</services>";
int numberOfHosts = 4;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(4, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(4, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(4, cluster.redundancy().effectiveReadyCopies());
assertEquals(4, cluster.getSearch().getIndexed().getDispatchSpec().getGroups().size());
assertFalse(cluster.getRootGroup().getPartitions().isPresent());
assertEquals(4, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getNodes().size(), is(4));
assertThat(cluster.getRootGroup().getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0"));
assertThat(cluster.getRootGroup().getNodes().get(1).getDistributionKey(), is(1));
assertThat(cluster.getRootGroup().getNodes().get(1).getConfigId(), is("bar/storage/1"));
assertThat(cluster.getRootGroup().getNodes().get(2).getDistributionKey(), is(2));
assertThat(cluster.getRootGroup().getNodes().get(2).getConfigId(), is("bar/storage/2"));
assertThat(cluster.getRootGroup().getNodes().get(3).getDistributionKey(), is(3));
assertThat(cluster.getRootGroup().getNodes().get(3).getConfigId(), is("bar/storage/3"));
}
@Test
public void testUsingNodesAndGroupCountAttributesAndGettingJustOneNode() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='3'>4</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24' groups='3'/>" +
" <engine><proton><searchable-copies>3</searchable-copies></proton></engine>" +
" </content>" +
"</services>";
int numberOfHosts = 1;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(1, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default01", clusterControllers.getContainers().get(0).getHostName());
assertEquals(1, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(1, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(1, cluster.redundancy().effectiveReadyCopies());
assertFalse(cluster.getRootGroup().getPartitions().isPresent());
assertEquals(1, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0"));
}
@Test(expected = IllegalArgumentException.class)
public void testRequiringMoreNodesThanAreAvailable() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <content version='1.0' id='bar'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='3' required='true'/>" +
" </content>" +
"</services>";
int numberOfHosts = 2;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
tester.createModel(services, false);
}
@Test
public void testUsingNodesCountAttributesAndGettingJustOneNode() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='8'>12</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24'/>" +
" <engine><proton><searchable-copies>5</searchable-copies></proton></engine>" +
" <dispatch><num-dispatch-groups>7</num-dispatch-groups></dispatch>" +
" </content>" +
"</services>";
int numberOfHosts = 1;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(1, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(1, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(1, cluster.redundancy().effectiveReadyCopies());
assertEquals(1, cluster.getSearch().getIndexed().getDispatchSpec().getGroups().size());
assertFalse(cluster.getRootGroup().getPartitions().isPresent());
assertEquals(1, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0"));
}
@Test
public void testRequestingSpecificFlavors() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'>" +
" <logservers><nodes count='1' dedicated='true' flavor='logserver-flavor'/></logservers>" +
" <slobroks><nodes count='2' dedicated='true' flavor='slobrok-flavor'/></slobroks>" +
" </admin>" +
" <container version='1.0' id='container'>" +
" <nodes count='4' flavor='container-flavor'/>" +
" </container>" +
" <content version='1.0' id='foo'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <controllers><nodes count='2' dedicated='true' flavor='controller-foo-flavor'/></controllers>" +
" <nodes count='5' flavor='content-foo-flavor'/>" +
" </content>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <controllers><nodes count='3' dedicated='true' flavor='controller-bar-flavor'/></controllers>" +
" <nodes count='6' flavor='content-bar-flavor'/>" +
" </content>" +
"</services>";
int totalHosts = 23;
VespaModelTester tester = new VespaModelTester();
tester.addHosts("logserver-flavor", 1);
tester.addHosts("slobrok-flavor", 2);
tester.addHosts("container-flavor", 4);
tester.addHosts("controller-foo-flavor", 2);
tester.addHosts("content-foo-flavor", 5);
tester.addHosts("controller-bar-flavor", 3);
tester.addHosts("content-bar-flavor", 6);
VespaModel model = tester.createModel(services, true, 0);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(totalHosts));
}
@Test
public void testJDiscOnly() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<jdisc version='1.0'>" +
" <search/>" +
" <nodes count='3'/>" +
"</jdisc>";
int numberOfHosts = 3;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertEquals(numberOfHosts, model.getRoot().getHostSystem().getHosts().size());
assertEquals(3, model.getContainerClusters().get("jdisc").getContainers().size());
assertNotNull(model.getAdmin().getLogserver());
assertEquals(3, model.getAdmin().getSlobroks().size());
}
@Test
public void testUsingHostaliasWithProvisioner() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
"<admin version='2.0'>" +
" <adminserver hostalias='node1'/>\n"+
"</admin>\n" +
"<jdisc id='mydisc' version='1.0'>" +
" <handler id='myHandler'>" +
" <component id='injected' />" +
" </handler>" +
" <nodes>" +
" <node hostalias='node1'/>" +
" </nodes>" +
"</jdisc>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().getHostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
}
@Test
public void testThatStandaloneSyntaxWorksOnHostedVespa() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<jdisc id='foo' version='1.0'>" +
" <http>" +
" <server id='server1' port='" + getDefaults().vespaWebServicePort() + "' />" +
" </http>" +
"</jdisc>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertThat(model.getHosts().size(), is(1));
assertThat(model.getContainerClusters().size(), is(1));
}
@Test
public void testNoNodeTagMeans1Node() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <jdisc id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" </jdisc>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().getHostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes());
}
@Test
public void testNoNodeTagMeans1NodeNoContent() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <jdisc id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" </jdisc>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().getHostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
}
@Test
public void testNoNodeTagMeans1NodeNonHosted() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <jdisc id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" </jdisc>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(false);
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().getHostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
assertEquals(1, model.getContentClusters().get("bar").getRootGroup().recursiveGetNodes().size());
}
@Test
public void testSingleNodeNonHosted() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <jdisc id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" <nodes><node hostalias='foo'/></nodes>"+
" </jdisc>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes><node hostalias='foo' distribution-key='0'/></nodes>"+
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(false);
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().getHostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes());
}
/** Recreate the combination used in some factory tests */
@Test
public void testMultitenantButNotHosted() {
String services =
"<?xml version='1.0' encoding='UTF-8' ?>" +
"<services version='1.0'>" +
" <admin version='2.0'>" +
" <adminserver hostalias='node1'/>" +
" </admin>" +
" <jdisc id='default' version='1.0'>" +
" <search/>" +
" <nodes>" +
" <node hostalias='node1'/>" +
" </nodes>" +
" </jdisc>" +
" <content id='storage' version='1.0'>" +
" <redundancy>2</redundancy>" +
" <group>" +
" <node distribution-key='0' hostalias='node1'/>" +
" <node distribution-key='1' hostalias='node1'/>" +
" </group>" +
" <tuning>" +
" <cluster-controller>" +
" <transition-time>0</transition-time>" +
" </cluster-controller>" +
" </tuning>" +
" <documents>" +
" <document mode='store-only' type='type1'/>" +
" </documents>" +
" <engine>" +
" <proton/>" +
" </engine>" +
" </content>" +
" </services>";
VespaModel model = createNonProvisionedMultitenantModel(services);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(1));
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(2, content.getRootGroup().getNodes().size());
ContainerCluster controller = content.getClusterControllers();
assertEquals(1, controller.getContainers().size());
}
@Test
@Test
public void testMultitenantButNotHostedSharedContentNode() {
String services =
"<?xml version='1.0' encoding='UTF-8' ?>" +
"<services version='1.0'>" +
" <admin version='2.0'>" +
" <adminserver hostalias='node1'/>" +
" </admin>" +
" <jdisc id='default' version='1.0'>" +
" <search/>" +
" <nodes>" +
" <node hostalias='node1'/>" +
" </nodes>" +
" </jdisc>" +
" <content id='storage' version='1.0'>" +
" <redundancy>2</redundancy>" +
" <group>" +
" <node distribution-key='0' hostalias='node1'/>" +
" <node distribution-key='1' hostalias='node1'/>" +
" </group>" +
" <tuning>" +
" <cluster-controller>" +
" <transition-time>0</transition-time>" +
" </cluster-controller>" +
" </tuning>" +
" <documents>" +
" <document mode='store-only' type='type1'/>" +
" </documents>" +
" <engine>" +
" <proton/>" +
" </engine>" +
" </content>" +
" <content id='search' version='1.0'>" +
" <redundancy>2</redundancy>" +
" <group>" +
" <node distribution-key='0' hostalias='node1'/>" +
" </group>" +
" <documents>" +
" <document type='type1'/>" +
" </documents>" +
" </content>" +
" </services>";
VespaModel model = createNonProvisionedMultitenantModel(services);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(1));
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(2, content.getRootGroup().getNodes().size());
ContainerCluster controller = content.getClusterControllers();
assertEquals(1, controller.getContainers().size());
}
private VespaModel createNonProvisionedMultitenantModel(String services) {
return createNonProvisionedModel(true, null, services);
}
private VespaModel createNonProvisionedModel(boolean multitenant, String hosts, String services) {
VespaModelCreatorWithMockPkg modelCreatorWithMockPkg = new VespaModelCreatorWithMockPkg(hosts, services, ApplicationPackageUtils.generateSearchDefinition("type1"));
ApplicationPackage appPkg = modelCreatorWithMockPkg.appPkg;
DeployState deployState = new DeployState.Builder().applicationPackage(appPkg).
properties((new DeployProperties.Builder()).multitenant(multitenant).build()).
build(true);
return modelCreatorWithMockPkg.create(false, deployState);
}
@Test
public void testThatTldConfigIdsAreDeterministic() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <jdisc version='1.0' id='jdisc0'>" +
" <search/>" +
" <nodes count='2'/>" +
" </jdisc>" +
" <jdisc version='1.0' id='jdisc1'>" +
" <search/>" +
" <nodes count='2'/>" +
" </jdisc>" +
" <content version='1.0' id='content0'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
int numberOfHosts = 8;
{
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
Map<String, ContentCluster> contentClusters = model.getContentClusters();
assertEquals(2, contentClusters.size());
checkThatTldAndContainerRunningOnSameHostHaveSameId(
model.getContainerClusters().values(),
model.getContentClusters().values(),
0);
}
{
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts + 1);
VespaModel model = tester.createModel(services, true, 1, "default0");
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
Map<String, ContentCluster> contentClusters = model.getContentClusters();
assertEquals(2, contentClusters.size());
checkThatTldAndContainerRunningOnSameHostHaveSameId(
model.getContainerClusters().values(),
model.getContentClusters().values(),
1);
}
}
private void checkThatTldAndContainerRunningOnSameHostHaveSameId(Collection<ContainerCluster> containerClusters,
Collection<ContentCluster> contentClusters,
int startIndexForContainerIds) {
for (ContentCluster contentCluster : contentClusters) {
String contentClusterName = contentCluster.getName();
int i = 0;
for (ContainerCluster containerCluster : containerClusters) {
String containerClusterName = containerCluster.getName();
for (int j = 0; j < 2; j++) {
Dispatch tld = contentCluster.getSearch().getIndexed().getTLDs().get(2 * i + j);
Container container = containerCluster.getContainers().get(j);
int containerConfigIdIndex = j + startIndexForContainerIds;
assertEquals(container.getHostName(), tld.getHostname());
assertEquals(contentClusterName + "/search/cluster." + contentClusterName + "/tlds/" +
containerClusterName + "." + containerConfigIdIndex + ".tld." + containerConfigIdIndex,
tld.getConfigId());
assertEquals(containerClusterName + "/" + "container." + containerConfigIdIndex,
container.getConfigId());
}
i++;
}
}
}
private int physicalMemoryPercentage(ContainerCluster cluster) {
QrStartConfig.Builder b = new QrStartConfig.Builder();
cluster.getSearch().getConfig(b);
return new QrStartConfig(b).jvm().heapSizeAsPercentageOfPhysicalMemory();
}
@Test
public void require_that_proton_config_is_tuned_based_on_node_flavor() {
String services = joinLines("<?xml version='1.0' encoding='utf-8' ?>",
"<services>",
" <content version='1.0' id='test'>",
" <documents>",
" <document type='type1' mode='index'/>",
" </documents>",
" <nodes count='2' flavor='content-test-flavor'/>",
" </content>",
"</services>");
VespaModelTester tester = new VespaModelTester();
tester.addHosts(createFlavorFromDiskSetting("content-test-flavor", false), 2);
VespaModel model = tester.createModel(services, true, 0);
ContentSearchCluster cluster = model.getContentClusters().get("test").getSearch();
assertEquals(2, cluster.getSearchNodes().size());
assertEquals(40, getProtonConfig(cluster, 0).hwinfo().disk().writespeed(), 0.001);
assertEquals(40, getProtonConfig(cluster, 1).hwinfo().disk().writespeed(), 0.001);
}
private static Flavor createFlavorFromDiskSetting(String name, boolean fastDisk) {
return new Flavor(new FlavorsConfig.Flavor(new FlavorsConfig.Flavor.Builder().
name(name).fastDisk(fastDisk)));
}
private static ProtonConfig getProtonConfig(ContentSearchCluster cluster, int searchNodeIdx) {
ProtonConfig.Builder builder = new ProtonConfig.Builder();
List<SearchNode> searchNodes = cluster.getSearchNodes();
assertTrue(searchNodeIdx < searchNodes.size());
searchNodes.get(searchNodeIdx).getConfig(builder);
return new ProtonConfig(builder);
}
@Test
public void require_that_config_override_and_explicit_proton_tuning_have_precedence_over_default_node_flavor_tuning() {
String services = joinLines("<?xml version='1.0' encoding='utf-8' ?>",
"<services>",
" <content version='1.0' id='test'>",
" <config name='vespa.config.search.core.proton'>",
" <flush><memory><maxtlssize>2000</maxtlssize></memory></flush>",
" </config>",
" <documents>",
" <document type='type1' mode='index'/>",
" </documents>",
" <nodes count='1' flavor='content-test-flavor'/>",
" <engine>",
" <proton>",
" <tuning>",
" <searchnode>",
" <flushstrategy>",
" <native>",
" <total>",
" <maxmemorygain>1000</maxmemorygain>",
" </total>",
" </native>",
" </flushstrategy>",
" </searchnode>",
" </tuning>",
" </proton>",
" </engine>",
" </content>",
"</services>");
VespaModelTester tester = new VespaModelTester();
tester.addHosts("default", 1);
tester.addHosts(createFlavorFromMemoryAndDisk("content-test-flavor", 128, 100), 1);
VespaModel model = tester.createModel(services, true, 0);
ContentSearchCluster cluster = model.getContentClusters().get("test").getSearch();
ProtonConfig cfg = getProtonConfig(model, cluster.getSearchNodes().get(0).getConfigId());
assertEquals(2000, cfg.flush().memory().maxtlssize());
assertEquals(1000, cfg.flush().memory().maxmemory());
assertEquals((long) 16 * GB, cfg.flush().memory().each().maxmemory());
}
private static long GB = 1024 * 1024 * 1024;
private static Flavor createFlavorFromMemoryAndDisk(String name, int memoryGb, int diskGb) {
return new Flavor(new FlavorsConfig.Flavor(new FlavorsConfig.Flavor.Builder().
name(name).minMainMemoryAvailableGb(memoryGb).minDiskAvailableGb(diskGb)));
}
private static ProtonConfig getProtonConfig(VespaModel model, String configId) {
ProtonConfig.Builder builder = new ProtonConfig.Builder();
model.getConfig(builder, configId);
return new ProtonConfig(builder);
}
} |
The newlines are actually unnecessary ... I didn't type them though: IntelliJ automatically makes this code when you paste a multilevel string, which is what I did here (source was a gh issue). | public void testSharedNodesNotHosted() {
String hosts =
"<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
"<hosts>\n" +
" <host name=\"vespa-1\">\n" +
" <alias>vespa-1</alias>\n" +
" </host>\n" +
" <host name=\"vespa-2\">\n" +
" <alias>vespa-2</alias>\n" +
" </host>\n" +
" <host name=\"vespa-3\">\n" +
" <alias>vespa-3</alias>\n" +
" </host>\n" +
"</hosts>";
String services =
"<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
"<services version=\"1.0\">\n" +
"\n" +
" <admin version=\"2.0\">\n" +
" <adminserver hostalias=\"vespa-1\"/>\n" +
" <configservers>\n" +
" <configserver hostalias=\"vespa-1\"/>\n" +
" </configservers>\n" +
" </admin>\n" +
"\n" +
" <container id=\"container\" version=\"1.0\">\n" +
" <document-processing/>\n" +
" <document-api/>\n" +
" <search/>\n" +
" <nodes jvmargs=\"-Xms512m -Xmx512m\">\n" +
" <node hostalias=\"vespa-1\"/>\n" +
" <node hostalias=\"vespa-2\"/>\n" +
" <node hostalias=\"vespa-3\"/>\n" +
" </nodes>\n" +
" </container>\n" +
"\n" +
" <content id=\"storage\" version=\"1.0\">\n" +
" <search>\n" +
" <visibility-delay>1.0</visibility-delay>\n" +
" </search>\n" +
" <redundancy>2</redundancy>\n" +
" <documents>\n" +
" <document type=\"type1\" mode=\"index\"/>\n" +
" <document-processing cluster=\"container\"/>\n" +
" </documents>\n" +
" <nodes>\n" +
" <node hostalias=\"vespa-1\" distribution-key=\"0\"/>\n" +
" <node hostalias=\"vespa-2\" distribution-key=\"1\"/>\n" +
" <node hostalias=\"vespa-3\" distribution-key=\"2\"/>\n" +
" </nodes>\n" +
" </content>\n" +
"\n" +
"</services>";
VespaModel model = createNonProvisionedModel(false, hosts, services);
assertEquals(3, model.getRoot().getHostSystem().getHosts().size());
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(3, content.getRootGroup().getNodes().size());
} | "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + | public void testSharedNodesNotHosted() {
String hosts =
"<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
"<hosts>\n" +
" <host name=\"vespa-1\">\n" +
" <alias>vespa-1</alias>\n" +
" </host>\n" +
" <host name=\"vespa-2\">\n" +
" <alias>vespa-2</alias>\n" +
" </host>\n" +
" <host name=\"vespa-3\">\n" +
" <alias>vespa-3</alias>\n" +
" </host>\n" +
"</hosts>";
String services =
"<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
"<services version=\"1.0\">\n" +
"\n" +
" <admin version=\"2.0\">\n" +
" <adminserver hostalias=\"vespa-1\"/>\n" +
" <configservers>\n" +
" <configserver hostalias=\"vespa-1\"/>\n" +
" </configservers>\n" +
" </admin>\n" +
"\n" +
" <container id=\"container\" version=\"1.0\">\n" +
" <document-processing/>\n" +
" <document-api/>\n" +
" <search/>\n" +
" <nodes jvmargs=\"-Xms512m -Xmx512m\">\n" +
" <node hostalias=\"vespa-1\"/>\n" +
" <node hostalias=\"vespa-2\"/>\n" +
" <node hostalias=\"vespa-3\"/>\n" +
" </nodes>\n" +
" </container>\n" +
"\n" +
" <content id=\"storage\" version=\"1.0\">\n" +
" <search>\n" +
" <visibility-delay>1.0</visibility-delay>\n" +
" </search>\n" +
" <redundancy>2</redundancy>\n" +
" <documents>\n" +
" <document type=\"type1\" mode=\"index\"/>\n" +
" <document-processing cluster=\"container\"/>\n" +
" </documents>\n" +
" <nodes>\n" +
" <node hostalias=\"vespa-1\" distribution-key=\"0\"/>\n" +
" <node hostalias=\"vespa-2\" distribution-key=\"1\"/>\n" +
" <node hostalias=\"vespa-3\" distribution-key=\"2\"/>\n" +
" </nodes>\n" +
" </content>\n" +
"\n" +
"</services>";
VespaModel model = createNonProvisionedModel(false, hosts, services);
assertEquals(3, model.getRoot().getHostSystem().getHosts().size());
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(3, content.getRootGroup().getNodes().size());
} | class ModelProvisioningTest {
@Test
public void testNodeCountForJdisc() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>\n" +
"\n" +
"<admin version='3.0'><nodes count='1' /></admin>\n" +
"<jdisc id='mydisc' version='1.0'>" +
" <handler id='myHandler'>" +
" <component id='injected' />" +
" </handler>" +
" <nodes count=\"3\"/>" +
"</jdisc>" +
"<jdisc id='mydisc2' version='1.0'>" +
" <document-processing/>" +
" <handler id='myHandler'>" +
" <component id='injected' />" +
" </handler>" +
" <nodes count='2' allocated-memory='45%' jvmargs='-verbosegc' preload='lib/blablamalloc.so'/>" +
"</jdisc>" +
"</services>";
String hosts ="<hosts>"
+ " <host name='myhost0'>"
+ " <alias>node0</alias>"
+ " </host>"
+ " <host name='myhost1'>"
+ " <alias>node1</alias>"
+ " </host>"
+ " <host name='myhost2'>"
+ " <alias>node2</alias>"
+ " </host>"
+ " <host name='myhost3'>"
+ " <alias>node3</alias>"
+ " </host>"
+ " <host name='myhost4'>"
+ " <alias>node4</alias>"
+ " </host>"
+ " <host name='myhost5'>"
+ " <alias>node5</alias>"
+ " </host>"
+ "</hosts>";
VespaModelCreatorWithMockPkg creator = new VespaModelCreatorWithMockPkg(null, services);
VespaModel model = creator.create(new DeployState.Builder().modelHostProvisioner(new InMemoryProvisioner(Hosts.readFrom(new StringReader(hosts)), true)));
assertThat(model.getContainerClusters().get("mydisc").getContainers().size(), is(3));
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(0).getConfigId(), is("mydisc/container.0"));
assertTrue(model.getContainerClusters().get("mydisc").getContainers().get(0).isInitialized());
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(1).getConfigId(), is("mydisc/container.1"));
assertTrue(model.getContainerClusters().get("mydisc").getContainers().get(1).isInitialized());
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(2).getConfigId(), is("mydisc/container.2"));
assertTrue(model.getContainerClusters().get("mydisc").getContainers().get(2).isInitialized());
assertThat(model.getContainerClusters().get("mydisc2").getContainers().size(), is(2));
assertThat(model.getContainerClusters().get("mydisc2").getContainers().get(0).getConfigId(), is("mydisc2/container.0"));
assertTrue(model.getContainerClusters().get("mydisc2").getContainers().get(0).isInitialized());
assertThat(model.getContainerClusters().get("mydisc2").getContainers().get(1).getConfigId(), is("mydisc2/container.1"));
assertTrue(model.getContainerClusters().get("mydisc2").getContainers().get(1).isInitialized());
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(0).getJvmArgs(), is(""));
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(1).getJvmArgs(), is(""));
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(2).getJvmArgs(), is(""));
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(0).getPreLoad(), is(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so")));
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(1).getPreLoad(), is(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so")));
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(2).getPreLoad(), is(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so")));
assertThat(model.getContainerClusters().get("mydisc").getMemoryPercentage(), is(Optional.empty()));
assertThat(model.getContainerClusters().get("mydisc2").getContainers().get(0).getJvmArgs(), is("-verbosegc"));
assertThat(model.getContainerClusters().get("mydisc2").getContainers().get(1).getJvmArgs(), is("-verbosegc"));
assertThat(model.getContainerClusters().get("mydisc2").getContainers().get(0).getPreLoad(), is("lib/blablamalloc.so"));
assertThat(model.getContainerClusters().get("mydisc2").getContainers().get(1).getPreLoad(), is("lib/blablamalloc.so"));
assertThat(model.getContainerClusters().get("mydisc2").getMemoryPercentage(), is(Optional.of(45)));
HostSystem hostSystem = model.getHostSystem();
assertNotNull(hostSystem.getHostByHostname("myhost0"));
assertNotNull(hostSystem.getHostByHostname("myhost1"));
assertNotNull(hostSystem.getHostByHostname("myhost2"));
assertNotNull(hostSystem.getHostByHostname("myhost3"));
assertNull(hostSystem.getHostByHostname("Nope"));
}
@Test
public void testNodeCountForContentGroup() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
"\n" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
int numberOfHosts = 2;
tester.addHosts(numberOfHosts);
int numberOfContentNodes = 2;
VespaModel model = tester.createModel(xmlWithNodes, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
final Map<String, ContentCluster> contentClusters = model.getContentClusters();
ContentCluster cluster = contentClusters.get("bar");
assertThat(cluster.getRootGroup().getNodes().size(), is(numberOfContentNodes));
int i = 0;
for (StorageNode node : cluster.getRootGroup().getNodes())
assertEquals(i++, node.getDistributionKey());
}
@Test
public void testSeparateClusters() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes count='1'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(3);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size());
assertEquals("Nodes in container1", 1, model.getContainerClusters().get("container1").getContainers().size());
assertEquals("Heap size for container", 60, physicalMemoryPercentage(model.getContainerClusters().get("container1")));
}
@Test
public void testClusterMembership() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes count='1'/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals(1, model.getHostSystem().getHosts().size());
HostResource host = model.getHostSystem().getHosts().iterator().next();
assertEquals(1, host.clusterMemberships().size());
ClusterMembership membership = host.clusterMemberships().iterator().next();
assertEquals("container", membership.cluster().type().name());
assertEquals("container1", membership.cluster().id().value());
}
@Test
public void testCombinedCluster() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes of='content1'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size());
assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size());
assertEquals("Heap size is lowered with combined clusters",
17, physicalMemoryPercentage(model.getContainerClusters().get("container1")));
}
@Test
public void testCombinedClusterWithJvmArgs() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <document-processing/>" +
" <nodes of='content1' jvmargs='testarg'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size());
assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size());
for (Container container : model.getContainerClusters().get("container1").getContainers())
assertTrue(container.getJvmArgs().contains("testarg"));
}
@Test
public void testMultipleCombinedClusters() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes of='content1'/>" +
" </container>" +
" <container version='1.0' id='container2'>" +
" <nodes of='content2'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
" <content version='1.0' id='content2'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='3'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(5);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size());
assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size());
assertEquals("Nodes in content2", 3, model.getContentClusters().get("content2").getRootGroup().getNodes().size());
assertEquals("Nodes in container2", 3, model.getContainerClusters().get("container2").getContainers().size());
}
@Test
public void testNonExistingCombinedClusterReference() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes of='container2'/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
try {
tester.createModel(xmlWithNodes, true);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("container cluster 'container1' references service 'container2' but this service is not defined", e.getMessage());
}
}
@Test
public void testInvalidCombinedClusterReference() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes of='container2'/><!-- invalid; only content clusters can be referenced -->" +
" </container>" +
" <container version='1.0' id='container2'>" +
" <nodes count='2'/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
try {
tester.createModel(xmlWithNodes, true);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("container cluster 'container1' references service 'container2', but that is not a content service", e.getMessage());
}
}
@Test
public void testUsingNodesAndGroupCountAttributes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='27' groups='9'/>" +
" </content>" +
" <content version='1.0' id='baz'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='27' groups='27'/>" +
" </content>" +
"</services>";
int numberOfHosts = 64;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
assertEquals(1, model.getContainerClusters().size());
Set<com.yahoo.vespa.model.Host> containerHosts = model.getContainerClusters().get("foo").getContainers().stream().map(Container::getHost).collect(Collectors.toSet());
assertEquals(10, containerHosts.size());
Admin admin = model.getAdmin();
Set<com.yahoo.vespa.model.Host> slobrokHosts = admin.getSlobroks().stream().map(Slobrok::getHost).collect(Collectors.toSet());
assertEquals(3, slobrokHosts.size());
assertTrue("Slobroks are assigned from container nodes", containerHosts.containsAll(slobrokHosts));
assertTrue("Logserver is assigned from container nodes", containerHosts.contains(admin.getLogserver().getHost()));
assertEquals("No in-cluster config servers in a hosted environment", 0, admin.getConfigservers().size());
assertEquals("No admin cluster controller when multitenant", null, admin.getClusterControllers());
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(3, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default28", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default31", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default54", clusterControllers.getContainers().get(2).getHostName());
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(9, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(3));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("bar/storage/0"));
assertEquals("default54", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getDistributionKey(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getConfigId(), is("bar/storage/1"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(2).getDistributionKey(), is(2));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(2).getConfigId(), is("bar/storage/2"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(3));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(3));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("bar/storage/3"));
assertEquals("default51", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getDistributionKey(), is(4));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getConfigId(), is("bar/storage/4"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(2).getDistributionKey(), is(5));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(2).getConfigId(), is("bar/storage/5"));
assertEquals("default48", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(8).getIndex(), is("8"));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().size(), is(3));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(0).getDistributionKey(), is(24));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(0).getConfigId(), is("bar/storage/24"));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(1).getDistributionKey(), is(25));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(1).getConfigId(), is("bar/storage/25"));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(2).getDistributionKey(), is(26));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(2).getConfigId(), is("bar/storage/26"));
cluster = model.getContentClusters().get("baz");
clusterControllers = cluster.getClusterControllers();
assertEquals(3, clusterControllers.getContainers().size());
assertEquals("baz-controllers", clusterControllers.getName());
assertEquals("default01", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default02", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default27", clusterControllers.getContainers().get(2).getHostName());
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(27, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("baz/storage/0"));
assertEquals("default27", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("baz/storage/1"));
assertEquals("default26", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName());
assertEquals("default25", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(26).getIndex(), is("26"));
assertThat(cluster.getRootGroup().getSubgroups().get(26).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(26).getNodes().get(0).getDistributionKey(), is(26));
assertThat(cluster.getRootGroup().getSubgroups().get(26).getNodes().get(0).getConfigId(), is("baz/storage/26"));
}
@Test
public void testGroupsOfSize1() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='8' groups='8'/>" +
" </content>" +
"</services>";
int numberOfHosts = 18;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(3, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default01", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default02", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default08", clusterControllers.getContainers().get(2).getHostName());
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(8, cluster.getRootGroup().getSubgroups().size());
assertEquals(8, cluster.distributionBits());
assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("bar/storage/0"));
assertEquals("default08", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("bar/storage/1"));
assertEquals("default07", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(7).getIndex(), is("7"));
assertThat(cluster.getRootGroup().getSubgroups().get(7).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(7).getNodes().get(0).getDistributionKey(), is(7));
assertThat(cluster.getRootGroup().getSubgroups().get(7).getNodes().get(0).getConfigId(), is("bar/storage/7"));
assertEquals("default01", cluster.getRootGroup().getSubgroups().get(7).getNodes().get(0).getHostName());
}
@Test
public void testExplicitNonDedicatedClusterControllers() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <controllers><nodes dedicated='false' count='6'/></controllers>" +
" <nodes count='9' groups='3'/>" +
" </content>" +
"</services>";
int numberOfHosts = 19;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals( 8, cluster.distributionBits());
assertEquals("We get the closest odd number", 5, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default01", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default02", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default04", clusterControllers.getContainers().get(2).getHostName());
assertEquals("default05", clusterControllers.getContainers().get(3).getHostName());
assertEquals("default07", clusterControllers.getContainers().get(4).getHostName());
assertEquals("default09", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName());
assertEquals("default08", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getHostName());
assertEquals("default06", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName());
assertEquals("default03", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName());
}
@Test
public void testClusterControllersWithGroupSize2() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='8' groups='4'/>" +
" </content>" +
"</services>";
int numberOfHosts = 18;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals("We get the closest odd number", 3, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default01", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default03", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default08", clusterControllers.getContainers().get(2).getHostName());
}
@Test
public void testClusterControllersCanSupplementWithAllContainerClusters() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo1'>" +
" <nodes count='2'/>" +
" </container>" +
" <container version='1.0' id='foo2'>" +
" <nodes count='1'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <controllers><nodes dedicated='false' count='5'/></controllers>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
int numberOfHosts = 5;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(1, clusterControllers.getContainers().size());
}
@Test
public void testClusterControllersAreNotPlacedOnRetiredNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='9' groups='3'/>" +
" </content>" +
"</services>";
int numberOfHosts = 19;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "default09", "default06", "default03");
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(3, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("Skipping retired default09", "default01", clusterControllers.getContainers().get(0).getHostName());
assertEquals("Skipping retired default03", "default04", clusterControllers.getContainers().get(1).getHostName());
assertEquals("Skipping retired default06", "default08", clusterControllers.getContainers().get(2).getHostName());
}
@Test
public void testSlobroksClustersAreExpandedToIncludeRetiredNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
"</services>";
int numberOfHosts = 10;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "default09");
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
assertEquals("Includes retired node", 1+3, model.getAdmin().getSlobroks().size());
assertEquals("default01", model.getAdmin().getSlobroks().get(0).getHostName());
assertEquals("default02", model.getAdmin().getSlobroks().get(1).getHostName());
assertEquals("default10", model.getAdmin().getSlobroks().get(2).getHostName());
assertEquals("Included in addition because it is retired", "default09", model.getAdmin().getSlobroks().get(3).getHostName());
}
@Test
public void testSlobroksClustersAreExpandedToIncludeRetiredNodesWhenRetiredComesLast() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
"</services>";
int numberOfHosts = 10;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "default09", "default08");
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
assertEquals("Includes retired node", 3+2, model.getAdmin().getSlobroks().size());
assertEquals("default01", model.getAdmin().getSlobroks().get(0).getHostName());
assertEquals("default02", model.getAdmin().getSlobroks().get(1).getHostName());
assertEquals("default10", model.getAdmin().getSlobroks().get(2).getHostName());
assertEquals("Included in addition because it is retired", "default08", model.getAdmin().getSlobroks().get(3).getHostName());
assertEquals("Included in addition because it is retired", "default09", model.getAdmin().getSlobroks().get(4).getHostName());
}
@Test
public void testSlobroksAreSpreadOverAllContainerClusters() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <container version='1.0' id='bar'>" +
" <nodes count='3'/>" +
" </container>" +
"</services>";
int numberOfHosts = 13;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "default12", "default03", "default02");
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
assertEquals("Includes retired node", 3+3, model.getAdmin().getSlobroks().size());
assertEquals("default04", model.getAdmin().getSlobroks().get(0).getHostName());
assertEquals("default13", model.getAdmin().getSlobroks().get(1).getHostName());
assertEquals("Included in addition because it is retired", "default12", model.getAdmin().getSlobroks().get(2).getHostName());
assertEquals("default01", model.getAdmin().getSlobroks().get(3).getHostName());
assertEquals("Included in addition because it is retired", "default02", model.getAdmin().getSlobroks().get(4).getHostName());
assertEquals("Included in addition because it is retired", "default03", model.getAdmin().getSlobroks().get(5).getHostName());
}
@Test
public void testSlobroksAreSpreadOverAllContainerClustersExceptNodeAdmin() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='routing'>" +
" <nodes count='10'/>" +
" </container>" +
" <container version='1.0' id='node-admin'>" +
" <nodes count='3'/>" +
" </container>" +
"</services>";
int numberOfHosts = 13;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
tester.setApplicationId("hosted-vespa", "routing", "default");
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
Set<String> routingHosts = getClusterHostnames(model, "routing");
assertEquals(10, routingHosts.size());
Set<String> nodeAdminHosts = getClusterHostnames(model, "node-admin");
assertEquals(3, nodeAdminHosts.size());
Set<String> slobrokHosts = model.getAdmin().getSlobroks().stream()
.map(AbstractService::getHostName)
.collect(Collectors.toSet());
assertEquals(3, slobrokHosts.size());
assertThat(slobrokHosts, everyItem(isIn(routingHosts)));
assertThat(slobrokHosts, everyItem(not(isIn(nodeAdminHosts))));
}
private Set<String> getClusterHostnames(VespaModel model, String clusterId) {
return model.getHosts().stream()
.filter(host -> host.getServices().stream()
.anyMatch(serviceInfo -> Objects.equals(
serviceInfo.getProperty("clustername"),
Optional.of(clusterId))))
.map(HostInfo::getHostname)
.collect(Collectors.toSet());
}
@Test
public void test2ContentNodesProduces1ClusterController() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
int numberOfHosts = 2;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(1, clusterControllers.getContainers().size());
}
@Test
public void test2ContentNodesWithContainerClusterProducesMixedClusterControllerCluster() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container version='1.0' id='foo'>" +
" <nodes count='3'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(5);
VespaModel model = tester.createModel(services, true);
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(1, clusterControllers.getContainers().size());
}
@Ignore
@Test
public void test2ContentNodesOn2ClustersWithContainerClusterProducesMixedClusterControllerCluster() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container version='1.0' id='container'>" +
" <nodes count='3' flavor='container-node'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2' flavor='content1-node'/>" +
" </content>" +
" <content version='1.0' id='content2'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2' flavor='content2-node'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts("container-node", 3);
tester.addHosts("content1-node", 2);
tester.addHosts("content2-node", 2);
VespaModel model = tester.createModel(services, true);
ContentCluster cluster1 = model.getContentClusters().get("content1");
ContainerCluster clusterControllers1 = cluster1.getClusterControllers();
assertEquals(1, clusterControllers1.getContainers().size());
assertEquals("content1-node0", clusterControllers1.getContainers().get(0).getHostName());
assertEquals("content1-node1", clusterControllers1.getContainers().get(1).getHostName());
assertEquals("container-node0", clusterControllers1.getContainers().get(2).getHostName());
ContentCluster cluster2 = model.getContentClusters().get("content2");
ContainerCluster clusterControllers2 = cluster2.getClusterControllers();
assertEquals(3, clusterControllers2.getContainers().size());
assertEquals("content2-node0", clusterControllers2.getContainers().get(0).getHostName());
assertEquals("content2-node1", clusterControllers2.getContainers().get(1).getHostName());
assertEquals("We do not pick the container used to supplement another cluster",
"container-node1", clusterControllers2.getContainers().get(2).getHostName());
}
@Test
public void testExplicitDedicatedClusterControllers() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <controllers><nodes dedicated='true' count='4'/></controllers>" +
" <nodes count='9' groups='3'/>" +
" </content>" +
"</services>";
int numberOfHosts = 23;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(4, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default04", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default03", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default02", clusterControllers.getContainers().get(2).getHostName());
assertEquals("default01", clusterControllers.getContainers().get(3).getHostName());
}
@Test
public void testUsingNodesAndGroupCountAttributesAndGettingTooFewNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='3'>4</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24' groups='3'/>" +
" <engine><proton><searchable-copies>3</searchable-copies></proton></engine>" +
" </content>" +
"</services>";
int numberOfHosts = 6;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(2*3, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(2*3, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(2*3, cluster.redundancy().effectiveReadyCopies());
assertEquals("2|2|*", cluster.getRootGroup().getPartitions().get());
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(3, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(2));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("bar/storage/0"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getDistributionKey(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getConfigId(), is("bar/storage/1"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(2));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(2));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("bar/storage/2"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getDistributionKey(), is(3));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getConfigId(), is("bar/storage/3"));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getIndex(), is("2"));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().size(), is(2));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getDistributionKey(), is(4));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getConfigId(), is("bar/storage/4"));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(1).getDistributionKey(), is(5));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(1).getConfigId(), is("bar/storage/5"));
}
@Test
public void testUsingNodesCountAttributesAndGettingTooFewNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='8'>12</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24'/>" +
" <engine><proton><searchable-copies>5</searchable-copies></proton></engine>" +
" <dispatch><num-dispatch-groups>7</num-dispatch-groups></dispatch>" +
" </content>" +
"</services>";
int numberOfHosts = 4;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(4, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(4, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(4, cluster.redundancy().effectiveReadyCopies());
assertEquals(4, cluster.getSearch().getIndexed().getDispatchSpec().getGroups().size());
assertFalse(cluster.getRootGroup().getPartitions().isPresent());
assertEquals(4, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getNodes().size(), is(4));
assertThat(cluster.getRootGroup().getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0"));
assertThat(cluster.getRootGroup().getNodes().get(1).getDistributionKey(), is(1));
assertThat(cluster.getRootGroup().getNodes().get(1).getConfigId(), is("bar/storage/1"));
assertThat(cluster.getRootGroup().getNodes().get(2).getDistributionKey(), is(2));
assertThat(cluster.getRootGroup().getNodes().get(2).getConfigId(), is("bar/storage/2"));
assertThat(cluster.getRootGroup().getNodes().get(3).getDistributionKey(), is(3));
assertThat(cluster.getRootGroup().getNodes().get(3).getConfigId(), is("bar/storage/3"));
}
@Test
public void testUsingNodesAndGroupCountAttributesAndGettingJustOneNode() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='3'>4</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24' groups='3'/>" +
" <engine><proton><searchable-copies>3</searchable-copies></proton></engine>" +
" </content>" +
"</services>";
int numberOfHosts = 1;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(1, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default01", clusterControllers.getContainers().get(0).getHostName());
assertEquals(1, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(1, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(1, cluster.redundancy().effectiveReadyCopies());
assertFalse(cluster.getRootGroup().getPartitions().isPresent());
assertEquals(1, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0"));
}
@Test(expected = IllegalArgumentException.class)
public void testRequiringMoreNodesThanAreAvailable() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <content version='1.0' id='bar'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='3' required='true'/>" +
" </content>" +
"</services>";
int numberOfHosts = 2;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
tester.createModel(services, false);
}
@Test
public void testUsingNodesCountAttributesAndGettingJustOneNode() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='8'>12</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24'/>" +
" <engine><proton><searchable-copies>5</searchable-copies></proton></engine>" +
" <dispatch><num-dispatch-groups>7</num-dispatch-groups></dispatch>" +
" </content>" +
"</services>";
int numberOfHosts = 1;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(1, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(1, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(1, cluster.redundancy().effectiveReadyCopies());
assertEquals(1, cluster.getSearch().getIndexed().getDispatchSpec().getGroups().size());
assertFalse(cluster.getRootGroup().getPartitions().isPresent());
assertEquals(1, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0"));
}
@Test
public void testRequestingSpecificFlavors() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'>" +
" <logservers><nodes count='1' dedicated='true' flavor='logserver-flavor'/></logservers>" +
" <slobroks><nodes count='2' dedicated='true' flavor='slobrok-flavor'/></slobroks>" +
" </admin>" +
" <container version='1.0' id='container'>" +
" <nodes count='4' flavor='container-flavor'/>" +
" </container>" +
" <content version='1.0' id='foo'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <controllers><nodes count='2' dedicated='true' flavor='controller-foo-flavor'/></controllers>" +
" <nodes count='5' flavor='content-foo-flavor'/>" +
" </content>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <controllers><nodes count='3' dedicated='true' flavor='controller-bar-flavor'/></controllers>" +
" <nodes count='6' flavor='content-bar-flavor'/>" +
" </content>" +
"</services>";
int totalHosts = 23;
VespaModelTester tester = new VespaModelTester();
tester.addHosts("logserver-flavor", 1);
tester.addHosts("slobrok-flavor", 2);
tester.addHosts("container-flavor", 4);
tester.addHosts("controller-foo-flavor", 2);
tester.addHosts("content-foo-flavor", 5);
tester.addHosts("controller-bar-flavor", 3);
tester.addHosts("content-bar-flavor", 6);
VespaModel model = tester.createModel(services, true, 0);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(totalHosts));
}
@Test
public void testJDiscOnly() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<jdisc version='1.0'>" +
" <search/>" +
" <nodes count='3'/>" +
"</jdisc>";
int numberOfHosts = 3;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertEquals(numberOfHosts, model.getRoot().getHostSystem().getHosts().size());
assertEquals(3, model.getContainerClusters().get("jdisc").getContainers().size());
assertNotNull(model.getAdmin().getLogserver());
assertEquals(3, model.getAdmin().getSlobroks().size());
}
@Test
public void testUsingHostaliasWithProvisioner() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
"<admin version='2.0'>" +
" <adminserver hostalias='node1'/>\n"+
"</admin>\n" +
"<jdisc id='mydisc' version='1.0'>" +
" <handler id='myHandler'>" +
" <component id='injected' />" +
" </handler>" +
" <nodes>" +
" <node hostalias='node1'/>" +
" </nodes>" +
"</jdisc>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().getHostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
}
@Test
public void testThatStandaloneSyntaxWorksOnHostedVespa() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<jdisc id='foo' version='1.0'>" +
" <http>" +
" <server id='server1' port='" + getDefaults().vespaWebServicePort() + "' />" +
" </http>" +
"</jdisc>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertThat(model.getHosts().size(), is(1));
assertThat(model.getContainerClusters().size(), is(1));
}
@Test
public void testNoNodeTagMeans1Node() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <jdisc id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" </jdisc>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().getHostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes());
}
@Test
public void testNoNodeTagMeans1NodeNoContent() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <jdisc id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" </jdisc>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().getHostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
}
@Test
public void testNoNodeTagMeans1NodeNonHosted() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <jdisc id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" </jdisc>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(false);
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().getHostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
assertEquals(1, model.getContentClusters().get("bar").getRootGroup().recursiveGetNodes().size());
}
@Test
public void testSingleNodeNonHosted() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <jdisc id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" <nodes><node hostalias='foo'/></nodes>"+
" </jdisc>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes><node hostalias='foo' distribution-key='0'/></nodes>"+
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(false);
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().getHostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes());
}
/** Recreate the combination used in some factory tests */
@Test
public void testMultitenantButNotHosted() {
String services =
"<?xml version='1.0' encoding='UTF-8' ?>" +
"<services version='1.0'>" +
" <admin version='2.0'>" +
" <adminserver hostalias='node1'/>" +
" </admin>" +
" <jdisc id='default' version='1.0'>" +
" <search/>" +
" <nodes>" +
" <node hostalias='node1'/>" +
" </nodes>" +
" </jdisc>" +
" <content id='storage' version='1.0'>" +
" <redundancy>2</redundancy>" +
" <group>" +
" <node distribution-key='0' hostalias='node1'/>" +
" <node distribution-key='1' hostalias='node1'/>" +
" </group>" +
" <tuning>" +
" <cluster-controller>" +
" <transition-time>0</transition-time>" +
" </cluster-controller>" +
" </tuning>" +
" <documents>" +
" <document mode='store-only' type='type1'/>" +
" </documents>" +
" <engine>" +
" <proton/>" +
" </engine>" +
" </content>" +
" </services>";
VespaModel model = createNonProvisionedMultitenantModel(services);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(1));
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(2, content.getRootGroup().getNodes().size());
ContainerCluster controller = content.getClusterControllers();
assertEquals(1, controller.getContainers().size());
}
@Test
@Test
public void testMultitenantButNotHostedSharedContentNode() {
String services =
"<?xml version='1.0' encoding='UTF-8' ?>" +
"<services version='1.0'>" +
" <admin version='2.0'>" +
" <adminserver hostalias='node1'/>" +
" </admin>" +
" <jdisc id='default' version='1.0'>" +
" <search/>" +
" <nodes>" +
" <node hostalias='node1'/>" +
" </nodes>" +
" </jdisc>" +
" <content id='storage' version='1.0'>" +
" <redundancy>2</redundancy>" +
" <group>" +
" <node distribution-key='0' hostalias='node1'/>" +
" <node distribution-key='1' hostalias='node1'/>" +
" </group>" +
" <tuning>" +
" <cluster-controller>" +
" <transition-time>0</transition-time>" +
" </cluster-controller>" +
" </tuning>" +
" <documents>" +
" <document mode='store-only' type='type1'/>" +
" </documents>" +
" <engine>" +
" <proton/>" +
" </engine>" +
" </content>" +
" <content id='search' version='1.0'>" +
" <redundancy>2</redundancy>" +
" <group>" +
" <node distribution-key='0' hostalias='node1'/>" +
" </group>" +
" <documents>" +
" <document type='type1'/>" +
" </documents>" +
" </content>" +
" </services>";
VespaModel model = createNonProvisionedMultitenantModel(services);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(1));
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(2, content.getRootGroup().getNodes().size());
ContainerCluster controller = content.getClusterControllers();
assertEquals(1, controller.getContainers().size());
}
private VespaModel createNonProvisionedMultitenantModel(String services) {
return createNonProvisionedModel(true, null, services);
}
private VespaModel createNonProvisionedModel(boolean multitenant, String hosts, String services) {
VespaModelCreatorWithMockPkg modelCreatorWithMockPkg = new VespaModelCreatorWithMockPkg(hosts, services, ApplicationPackageUtils.generateSearchDefinition("type1"));
ApplicationPackage appPkg = modelCreatorWithMockPkg.appPkg;
DeployState deployState = new DeployState.Builder().applicationPackage(appPkg).
properties((new DeployProperties.Builder()).multitenant(multitenant).build()).
build(true);
return modelCreatorWithMockPkg.create(false, deployState);
}
@Test
public void testThatTldConfigIdsAreDeterministic() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <jdisc version='1.0' id='jdisc0'>" +
" <search/>" +
" <nodes count='2'/>" +
" </jdisc>" +
" <jdisc version='1.0' id='jdisc1'>" +
" <search/>" +
" <nodes count='2'/>" +
" </jdisc>" +
" <content version='1.0' id='content0'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
int numberOfHosts = 8;
{
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
Map<String, ContentCluster> contentClusters = model.getContentClusters();
assertEquals(2, contentClusters.size());
checkThatTldAndContainerRunningOnSameHostHaveSameId(
model.getContainerClusters().values(),
model.getContentClusters().values(),
0);
}
{
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts + 1);
VespaModel model = tester.createModel(services, true, 1, "default0");
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
Map<String, ContentCluster> contentClusters = model.getContentClusters();
assertEquals(2, contentClusters.size());
checkThatTldAndContainerRunningOnSameHostHaveSameId(
model.getContainerClusters().values(),
model.getContentClusters().values(),
1);
}
}
private void checkThatTldAndContainerRunningOnSameHostHaveSameId(Collection<ContainerCluster> containerClusters,
Collection<ContentCluster> contentClusters,
int startIndexForContainerIds) {
for (ContentCluster contentCluster : contentClusters) {
String contentClusterName = contentCluster.getName();
int i = 0;
for (ContainerCluster containerCluster : containerClusters) {
String containerClusterName = containerCluster.getName();
for (int j = 0; j < 2; j++) {
Dispatch tld = contentCluster.getSearch().getIndexed().getTLDs().get(2 * i + j);
Container container = containerCluster.getContainers().get(j);
int containerConfigIdIndex = j + startIndexForContainerIds;
assertEquals(container.getHostName(), tld.getHostname());
assertEquals(contentClusterName + "/search/cluster." + contentClusterName + "/tlds/" +
containerClusterName + "." + containerConfigIdIndex + ".tld." + containerConfigIdIndex,
tld.getConfigId());
assertEquals(containerClusterName + "/" + "container." + containerConfigIdIndex,
container.getConfigId());
}
i++;
}
}
}
private int physicalMemoryPercentage(ContainerCluster cluster) {
QrStartConfig.Builder b = new QrStartConfig.Builder();
cluster.getSearch().getConfig(b);
return new QrStartConfig(b).jvm().heapSizeAsPercentageOfPhysicalMemory();
}
@Test
public void require_that_proton_config_is_tuned_based_on_node_flavor() {
String services = joinLines("<?xml version='1.0' encoding='utf-8' ?>",
"<services>",
" <content version='1.0' id='test'>",
" <documents>",
" <document type='type1' mode='index'/>",
" </documents>",
" <nodes count='2' flavor='content-test-flavor'/>",
" </content>",
"</services>");
VespaModelTester tester = new VespaModelTester();
tester.addHosts(createFlavorFromDiskSetting("content-test-flavor", false), 2);
VespaModel model = tester.createModel(services, true, 0);
ContentSearchCluster cluster = model.getContentClusters().get("test").getSearch();
assertEquals(2, cluster.getSearchNodes().size());
assertEquals(40, getProtonConfig(cluster, 0).hwinfo().disk().writespeed(), 0.001);
assertEquals(40, getProtonConfig(cluster, 1).hwinfo().disk().writespeed(), 0.001);
}
private static Flavor createFlavorFromDiskSetting(String name, boolean fastDisk) {
return new Flavor(new FlavorsConfig.Flavor(new FlavorsConfig.Flavor.Builder().
name(name).fastDisk(fastDisk)));
}
private static ProtonConfig getProtonConfig(ContentSearchCluster cluster, int searchNodeIdx) {
ProtonConfig.Builder builder = new ProtonConfig.Builder();
List<SearchNode> searchNodes = cluster.getSearchNodes();
assertTrue(searchNodeIdx < searchNodes.size());
searchNodes.get(searchNodeIdx).getConfig(builder);
return new ProtonConfig(builder);
}
@Test
public void require_that_config_override_and_explicit_proton_tuning_have_precedence_over_default_node_flavor_tuning() {
String services = joinLines("<?xml version='1.0' encoding='utf-8' ?>",
"<services>",
" <content version='1.0' id='test'>",
" <config name='vespa.config.search.core.proton'>",
" <flush><memory><maxtlssize>2000</maxtlssize></memory></flush>",
" </config>",
" <documents>",
" <document type='type1' mode='index'/>",
" </documents>",
" <nodes count='1' flavor='content-test-flavor'/>",
" <engine>",
" <proton>",
" <tuning>",
" <searchnode>",
" <flushstrategy>",
" <native>",
" <total>",
" <maxmemorygain>1000</maxmemorygain>",
" </total>",
" </native>",
" </flushstrategy>",
" </searchnode>",
" </tuning>",
" </proton>",
" </engine>",
" </content>",
"</services>");
VespaModelTester tester = new VespaModelTester();
tester.addHosts("default", 1);
tester.addHosts(createFlavorFromMemoryAndDisk("content-test-flavor", 128, 100), 1);
VespaModel model = tester.createModel(services, true, 0);
ContentSearchCluster cluster = model.getContentClusters().get("test").getSearch();
ProtonConfig cfg = getProtonConfig(model, cluster.getSearchNodes().get(0).getConfigId());
assertEquals(2000, cfg.flush().memory().maxtlssize());
assertEquals(1000, cfg.flush().memory().maxmemory());
assertEquals((long) 16 * GB, cfg.flush().memory().each().maxmemory());
}
private static long GB = 1024 * 1024 * 1024;
private static Flavor createFlavorFromMemoryAndDisk(String name, int memoryGb, int diskGb) {
return new Flavor(new FlavorsConfig.Flavor(new FlavorsConfig.Flavor.Builder().
name(name).minMainMemoryAvailableGb(memoryGb).minDiskAvailableGb(diskGb)));
}
private static ProtonConfig getProtonConfig(VespaModel model, String configId) {
ProtonConfig.Builder builder = new ProtonConfig.Builder();
model.getConfig(builder, configId);
return new ProtonConfig(builder);
}
} | class ModelProvisioningTest {
@Test
public void testNodeCountForJdisc() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>\n" +
"\n" +
"<admin version='3.0'><nodes count='1' /></admin>\n" +
"<jdisc id='mydisc' version='1.0'>" +
" <handler id='myHandler'>" +
" <component id='injected' />" +
" </handler>" +
" <nodes count=\"3\"/>" +
"</jdisc>" +
"<jdisc id='mydisc2' version='1.0'>" +
" <document-processing/>" +
" <handler id='myHandler'>" +
" <component id='injected' />" +
" </handler>" +
" <nodes count='2' allocated-memory='45%' jvmargs='-verbosegc' preload='lib/blablamalloc.so'/>" +
"</jdisc>" +
"</services>";
String hosts ="<hosts>"
+ " <host name='myhost0'>"
+ " <alias>node0</alias>"
+ " </host>"
+ " <host name='myhost1'>"
+ " <alias>node1</alias>"
+ " </host>"
+ " <host name='myhost2'>"
+ " <alias>node2</alias>"
+ " </host>"
+ " <host name='myhost3'>"
+ " <alias>node3</alias>"
+ " </host>"
+ " <host name='myhost4'>"
+ " <alias>node4</alias>"
+ " </host>"
+ " <host name='myhost5'>"
+ " <alias>node5</alias>"
+ " </host>"
+ "</hosts>";
VespaModelCreatorWithMockPkg creator = new VespaModelCreatorWithMockPkg(null, services);
VespaModel model = creator.create(new DeployState.Builder().modelHostProvisioner(new InMemoryProvisioner(Hosts.readFrom(new StringReader(hosts)), true)));
assertThat(model.getContainerClusters().get("mydisc").getContainers().size(), is(3));
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(0).getConfigId(), is("mydisc/container.0"));
assertTrue(model.getContainerClusters().get("mydisc").getContainers().get(0).isInitialized());
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(1).getConfigId(), is("mydisc/container.1"));
assertTrue(model.getContainerClusters().get("mydisc").getContainers().get(1).isInitialized());
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(2).getConfigId(), is("mydisc/container.2"));
assertTrue(model.getContainerClusters().get("mydisc").getContainers().get(2).isInitialized());
assertThat(model.getContainerClusters().get("mydisc2").getContainers().size(), is(2));
assertThat(model.getContainerClusters().get("mydisc2").getContainers().get(0).getConfigId(), is("mydisc2/container.0"));
assertTrue(model.getContainerClusters().get("mydisc2").getContainers().get(0).isInitialized());
assertThat(model.getContainerClusters().get("mydisc2").getContainers().get(1).getConfigId(), is("mydisc2/container.1"));
assertTrue(model.getContainerClusters().get("mydisc2").getContainers().get(1).isInitialized());
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(0).getJvmArgs(), is(""));
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(1).getJvmArgs(), is(""));
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(2).getJvmArgs(), is(""));
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(0).getPreLoad(), is(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so")));
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(1).getPreLoad(), is(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so")));
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(2).getPreLoad(), is(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so")));
assertThat(model.getContainerClusters().get("mydisc").getMemoryPercentage(), is(Optional.empty()));
assertThat(model.getContainerClusters().get("mydisc2").getContainers().get(0).getJvmArgs(), is("-verbosegc"));
assertThat(model.getContainerClusters().get("mydisc2").getContainers().get(1).getJvmArgs(), is("-verbosegc"));
assertThat(model.getContainerClusters().get("mydisc2").getContainers().get(0).getPreLoad(), is("lib/blablamalloc.so"));
assertThat(model.getContainerClusters().get("mydisc2").getContainers().get(1).getPreLoad(), is("lib/blablamalloc.so"));
assertThat(model.getContainerClusters().get("mydisc2").getMemoryPercentage(), is(Optional.of(45)));
HostSystem hostSystem = model.getHostSystem();
assertNotNull(hostSystem.getHostByHostname("myhost0"));
assertNotNull(hostSystem.getHostByHostname("myhost1"));
assertNotNull(hostSystem.getHostByHostname("myhost2"));
assertNotNull(hostSystem.getHostByHostname("myhost3"));
assertNull(hostSystem.getHostByHostname("Nope"));
}
@Test
public void testNodeCountForContentGroup() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
"\n" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
int numberOfHosts = 2;
tester.addHosts(numberOfHosts);
int numberOfContentNodes = 2;
VespaModel model = tester.createModel(xmlWithNodes, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
final Map<String, ContentCluster> contentClusters = model.getContentClusters();
ContentCluster cluster = contentClusters.get("bar");
assertThat(cluster.getRootGroup().getNodes().size(), is(numberOfContentNodes));
int i = 0;
for (StorageNode node : cluster.getRootGroup().getNodes())
assertEquals(i++, node.getDistributionKey());
}
@Test
public void testSeparateClusters() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes count='1'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(3);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size());
assertEquals("Nodes in container1", 1, model.getContainerClusters().get("container1").getContainers().size());
assertEquals("Heap size for container", 60, physicalMemoryPercentage(model.getContainerClusters().get("container1")));
}
@Test
public void testClusterMembership() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes count='1'/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals(1, model.getHostSystem().getHosts().size());
HostResource host = model.getHostSystem().getHosts().iterator().next();
assertEquals(1, host.clusterMemberships().size());
ClusterMembership membership = host.clusterMemberships().iterator().next();
assertEquals("container", membership.cluster().type().name());
assertEquals("container1", membership.cluster().id().value());
}
@Test
public void testCombinedCluster() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <search/>" +
" <nodes of='content1'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size());
assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size());
assertEquals("Heap size is lowered with combined clusters",
17, physicalMemoryPercentage(model.getContainerClusters().get("container1")));
}
@Test
public void testCombinedClusterWithJvmArgs() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <document-processing/>" +
" <nodes of='content1' jvmargs='testarg'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size());
assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size());
for (Container container : model.getContainerClusters().get("container1").getContainers())
assertTrue(container.getJvmArgs().contains("testarg"));
}
@Test
public void testMultipleCombinedClusters() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes of='content1'/>" +
" </container>" +
" <container version='1.0' id='container2'>" +
" <nodes of='content2'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
" <content version='1.0' id='content2'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='3'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(5);
VespaModel model = tester.createModel(xmlWithNodes, true);
assertEquals("Nodes in content1", 2, model.getContentClusters().get("content1").getRootGroup().getNodes().size());
assertEquals("Nodes in container1", 2, model.getContainerClusters().get("container1").getContainers().size());
assertEquals("Nodes in content2", 3, model.getContentClusters().get("content2").getRootGroup().getNodes().size());
assertEquals("Nodes in container2", 3, model.getContainerClusters().get("container2").getContainers().size());
}
@Test
public void testNonExistingCombinedClusterReference() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes of='container2'/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
try {
tester.createModel(xmlWithNodes, true);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("container cluster 'container1' references service 'container2' but this service is not defined", e.getMessage());
}
}
@Test
public void testInvalidCombinedClusterReference() {
String xmlWithNodes =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <container version='1.0' id='container1'>" +
" <nodes of='container2'/><!-- invalid; only content clusters can be referenced -->" +
" </container>" +
" <container version='1.0' id='container2'>" +
" <nodes count='2'/>" +
" </container>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(2);
try {
tester.createModel(xmlWithNodes, true);
fail("Expected exception");
}
catch (IllegalArgumentException e) {
assertEquals("container cluster 'container1' references service 'container2', but that is not a content service", e.getMessage());
}
}
@Test
public void testUsingNodesAndGroupCountAttributes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='27' groups='9'/>" +
" </content>" +
" <content version='1.0' id='baz'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='27' groups='27'/>" +
" </content>" +
"</services>";
int numberOfHosts = 64;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
assertEquals(1, model.getContainerClusters().size());
Set<com.yahoo.vespa.model.Host> containerHosts = model.getContainerClusters().get("foo").getContainers().stream().map(Container::getHost).collect(Collectors.toSet());
assertEquals(10, containerHosts.size());
Admin admin = model.getAdmin();
Set<com.yahoo.vespa.model.Host> slobrokHosts = admin.getSlobroks().stream().map(Slobrok::getHost).collect(Collectors.toSet());
assertEquals(3, slobrokHosts.size());
assertTrue("Slobroks are assigned from container nodes", containerHosts.containsAll(slobrokHosts));
assertTrue("Logserver is assigned from container nodes", containerHosts.contains(admin.getLogserver().getHost()));
assertEquals("No in-cluster config servers in a hosted environment", 0, admin.getConfigservers().size());
assertEquals("No admin cluster controller when multitenant", null, admin.getClusterControllers());
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(3, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default28", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default31", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default54", clusterControllers.getContainers().get(2).getHostName());
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(9, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(3));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("bar/storage/0"));
assertEquals("default54", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getDistributionKey(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getConfigId(), is("bar/storage/1"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(2).getDistributionKey(), is(2));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(2).getConfigId(), is("bar/storage/2"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(3));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(3));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("bar/storage/3"));
assertEquals("default51", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getDistributionKey(), is(4));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getConfigId(), is("bar/storage/4"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(2).getDistributionKey(), is(5));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(2).getConfigId(), is("bar/storage/5"));
assertEquals("default48", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(8).getIndex(), is("8"));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().size(), is(3));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(0).getDistributionKey(), is(24));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(0).getConfigId(), is("bar/storage/24"));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(1).getDistributionKey(), is(25));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(1).getConfigId(), is("bar/storage/25"));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(2).getDistributionKey(), is(26));
assertThat(cluster.getRootGroup().getSubgroups().get(8).getNodes().get(2).getConfigId(), is("bar/storage/26"));
cluster = model.getContentClusters().get("baz");
clusterControllers = cluster.getClusterControllers();
assertEquals(3, clusterControllers.getContainers().size());
assertEquals("baz-controllers", clusterControllers.getName());
assertEquals("default01", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default02", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default27", clusterControllers.getContainers().get(2).getHostName());
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(27, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("baz/storage/0"));
assertEquals("default27", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("baz/storage/1"));
assertEquals("default26", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName());
assertEquals("default25", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(26).getIndex(), is("26"));
assertThat(cluster.getRootGroup().getSubgroups().get(26).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(26).getNodes().get(0).getDistributionKey(), is(26));
assertThat(cluster.getRootGroup().getSubgroups().get(26).getNodes().get(0).getConfigId(), is("baz/storage/26"));
}
@Test
public void testGroupsOfSize1() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='8' groups='8'/>" +
" </content>" +
"</services>";
int numberOfHosts = 18;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(3, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default01", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default02", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default08", clusterControllers.getContainers().get(2).getHostName());
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(8, cluster.getRootGroup().getSubgroups().size());
assertEquals(8, cluster.distributionBits());
assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("bar/storage/0"));
assertEquals("default08", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("bar/storage/1"));
assertEquals("default07", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName());
assertThat(cluster.getRootGroup().getSubgroups().get(7).getIndex(), is("7"));
assertThat(cluster.getRootGroup().getSubgroups().get(7).getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(7).getNodes().get(0).getDistributionKey(), is(7));
assertThat(cluster.getRootGroup().getSubgroups().get(7).getNodes().get(0).getConfigId(), is("bar/storage/7"));
assertEquals("default01", cluster.getRootGroup().getSubgroups().get(7).getNodes().get(0).getHostName());
}
@Test
public void testExplicitNonDedicatedClusterControllers() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <controllers><nodes dedicated='false' count='6'/></controllers>" +
" <nodes count='9' groups='3'/>" +
" </content>" +
"</services>";
int numberOfHosts = 19;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals( 8, cluster.distributionBits());
assertEquals("We get the closest odd number", 5, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default01", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default02", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default04", clusterControllers.getContainers().get(2).getHostName());
assertEquals("default05", clusterControllers.getContainers().get(3).getHostName());
assertEquals("default07", clusterControllers.getContainers().get(4).getHostName());
assertEquals("default09", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getHostName());
assertEquals("default08", cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getHostName());
assertEquals("default06", cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getHostName());
assertEquals("default03", cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getHostName());
}
@Test
public void testClusterControllersWithGroupSize2() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='8' groups='4'/>" +
" </content>" +
"</services>";
int numberOfHosts = 18;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals("We get the closest odd number", 3, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default01", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default03", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default08", clusterControllers.getContainers().get(2).getHostName());
}
@Test
public void testClusterControllersCanSupplementWithAllContainerClusters() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo1'>" +
" <nodes count='2'/>" +
" </container>" +
" <container version='1.0' id='foo2'>" +
" <nodes count='1'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <controllers><nodes dedicated='false' count='5'/></controllers>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
int numberOfHosts = 5;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(1, clusterControllers.getContainers().size());
}
@Test
public void testClusterControllersAreNotPlacedOnRetiredNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='9' groups='3'/>" +
" </content>" +
"</services>";
int numberOfHosts = 19;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "default09", "default06", "default03");
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(3, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("Skipping retired default09", "default01", clusterControllers.getContainers().get(0).getHostName());
assertEquals("Skipping retired default03", "default04", clusterControllers.getContainers().get(1).getHostName());
assertEquals("Skipping retired default06", "default08", clusterControllers.getContainers().get(2).getHostName());
}
@Test
public void testSlobroksClustersAreExpandedToIncludeRetiredNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
"</services>";
int numberOfHosts = 10;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "default09");
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
assertEquals("Includes retired node", 1+3, model.getAdmin().getSlobroks().size());
assertEquals("default01", model.getAdmin().getSlobroks().get(0).getHostName());
assertEquals("default02", model.getAdmin().getSlobroks().get(1).getHostName());
assertEquals("default10", model.getAdmin().getSlobroks().get(2).getHostName());
assertEquals("Included in addition because it is retired", "default09", model.getAdmin().getSlobroks().get(3).getHostName());
}
@Test
public void testSlobroksClustersAreExpandedToIncludeRetiredNodesWhenRetiredComesLast() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
"</services>";
int numberOfHosts = 10;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "default09", "default08");
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
assertEquals("Includes retired node", 3+2, model.getAdmin().getSlobroks().size());
assertEquals("default01", model.getAdmin().getSlobroks().get(0).getHostName());
assertEquals("default02", model.getAdmin().getSlobroks().get(1).getHostName());
assertEquals("default10", model.getAdmin().getSlobroks().get(2).getHostName());
assertEquals("Included in addition because it is retired", "default08", model.getAdmin().getSlobroks().get(3).getHostName());
assertEquals("Included in addition because it is retired", "default09", model.getAdmin().getSlobroks().get(4).getHostName());
}
@Test
public void testSlobroksAreSpreadOverAllContainerClusters() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <container version='1.0' id='bar'>" +
" <nodes count='3'/>" +
" </container>" +
"</services>";
int numberOfHosts = 13;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true, "default12", "default03", "default02");
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
assertEquals("Includes retired node", 3+3, model.getAdmin().getSlobroks().size());
assertEquals("default04", model.getAdmin().getSlobroks().get(0).getHostName());
assertEquals("default13", model.getAdmin().getSlobroks().get(1).getHostName());
assertEquals("Included in addition because it is retired", "default12", model.getAdmin().getSlobroks().get(2).getHostName());
assertEquals("default01", model.getAdmin().getSlobroks().get(3).getHostName());
assertEquals("Included in addition because it is retired", "default02", model.getAdmin().getSlobroks().get(4).getHostName());
assertEquals("Included in addition because it is retired", "default03", model.getAdmin().getSlobroks().get(5).getHostName());
}
@Test
public void testSlobroksAreSpreadOverAllContainerClustersExceptNodeAdmin() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <container version='1.0' id='routing'>" +
" <nodes count='10'/>" +
" </container>" +
" <container version='1.0' id='node-admin'>" +
" <nodes count='3'/>" +
" </container>" +
"</services>";
int numberOfHosts = 13;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
tester.setApplicationId("hosted-vespa", "routing", "default");
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
Set<String> routingHosts = getClusterHostnames(model, "routing");
assertEquals(10, routingHosts.size());
Set<String> nodeAdminHosts = getClusterHostnames(model, "node-admin");
assertEquals(3, nodeAdminHosts.size());
Set<String> slobrokHosts = model.getAdmin().getSlobroks().stream()
.map(AbstractService::getHostName)
.collect(Collectors.toSet());
assertEquals(3, slobrokHosts.size());
assertThat(slobrokHosts, everyItem(isIn(routingHosts)));
assertThat(slobrokHosts, everyItem(not(isIn(nodeAdminHosts))));
}
private Set<String> getClusterHostnames(VespaModel model, String clusterId) {
return model.getHosts().stream()
.filter(host -> host.getServices().stream()
.anyMatch(serviceInfo -> Objects.equals(
serviceInfo.getProperty("clustername"),
Optional.of(clusterId))))
.map(HostInfo::getHostname)
.collect(Collectors.toSet());
}
@Test
public void test2ContentNodesProduces1ClusterController() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
int numberOfHosts = 2;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(1, clusterControllers.getContainers().size());
}
@Test
public void test2ContentNodesWithContainerClusterProducesMixedClusterControllerCluster() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container version='1.0' id='foo'>" +
" <nodes count='3'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(5);
VespaModel model = tester.createModel(services, true);
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(1, clusterControllers.getContainers().size());
}
@Ignore
@Test
public void test2ContentNodesOn2ClustersWithContainerClusterProducesMixedClusterControllerCluster() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container version='1.0' id='container'>" +
" <nodes count='3' flavor='container-node'/>" +
" </container>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2' flavor='content1-node'/>" +
" </content>" +
" <content version='1.0' id='content2'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2' flavor='content2-node'/>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts("container-node", 3);
tester.addHosts("content1-node", 2);
tester.addHosts("content2-node", 2);
VespaModel model = tester.createModel(services, true);
ContentCluster cluster1 = model.getContentClusters().get("content1");
ContainerCluster clusterControllers1 = cluster1.getClusterControllers();
assertEquals(1, clusterControllers1.getContainers().size());
assertEquals("content1-node0", clusterControllers1.getContainers().get(0).getHostName());
assertEquals("content1-node1", clusterControllers1.getContainers().get(1).getHostName());
assertEquals("container-node0", clusterControllers1.getContainers().get(2).getHostName());
ContentCluster cluster2 = model.getContentClusters().get("content2");
ContainerCluster clusterControllers2 = cluster2.getClusterControllers();
assertEquals(3, clusterControllers2.getContainers().size());
assertEquals("content2-node0", clusterControllers2.getContainers().get(0).getHostName());
assertEquals("content2-node1", clusterControllers2.getContainers().get(1).getHostName());
assertEquals("We do not pick the container used to supplement another cluster",
"container-node1", clusterControllers2.getContainers().get(2).getHostName());
}
@Test
public void testExplicitDedicatedClusterControllers() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <container version='1.0' id='foo'>" +
" <nodes count='10'/>" +
" </container>" +
" <content version='1.0' id='bar'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <controllers><nodes dedicated='true' count='4'/></controllers>" +
" <nodes count='9' groups='3'/>" +
" </content>" +
"</services>";
int numberOfHosts = 23;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(4, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default04", clusterControllers.getContainers().get(0).getHostName());
assertEquals("default03", clusterControllers.getContainers().get(1).getHostName());
assertEquals("default02", clusterControllers.getContainers().get(2).getHostName());
assertEquals("default01", clusterControllers.getContainers().get(3).getHostName());
}
@Test
public void testUsingNodesAndGroupCountAttributesAndGettingTooFewNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='3'>4</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24' groups='3'/>" +
" <engine><proton><searchable-copies>3</searchable-copies></proton></engine>" +
" </content>" +
"</services>";
int numberOfHosts = 6;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(2*3, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(2*3, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(2*3, cluster.redundancy().effectiveReadyCopies());
assertEquals("2|2|*", cluster.getRootGroup().getPartitions().get());
assertEquals(0, cluster.getRootGroup().getNodes().size());
assertEquals(3, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getSubgroups().get(0).getIndex(), is("0"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().size(), is(2));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(0).getConfigId(), is("bar/storage/0"));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getDistributionKey(), is(1));
assertThat(cluster.getRootGroup().getSubgroups().get(0).getNodes().get(1).getConfigId(), is("bar/storage/1"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getIndex(), is("1"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().size(), is(2));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getDistributionKey(), is(2));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(0).getConfigId(), is("bar/storage/2"));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getDistributionKey(), is(3));
assertThat(cluster.getRootGroup().getSubgroups().get(1).getNodes().get(1).getConfigId(), is("bar/storage/3"));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getIndex(), is("2"));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().size(), is(2));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getDistributionKey(), is(4));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(0).getConfigId(), is("bar/storage/4"));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(1).getDistributionKey(), is(5));
assertThat(cluster.getRootGroup().getSubgroups().get(2).getNodes().get(1).getConfigId(), is("bar/storage/5"));
}
@Test
public void testUsingNodesCountAttributesAndGettingTooFewNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='8'>12</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24'/>" +
" <engine><proton><searchable-copies>5</searchable-copies></proton></engine>" +
" <dispatch><num-dispatch-groups>7</num-dispatch-groups></dispatch>" +
" </content>" +
"</services>";
int numberOfHosts = 4;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(4, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(4, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(4, cluster.redundancy().effectiveReadyCopies());
assertEquals(4, cluster.getSearch().getIndexed().getDispatchSpec().getGroups().size());
assertFalse(cluster.getRootGroup().getPartitions().isPresent());
assertEquals(4, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getNodes().size(), is(4));
assertThat(cluster.getRootGroup().getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0"));
assertThat(cluster.getRootGroup().getNodes().get(1).getDistributionKey(), is(1));
assertThat(cluster.getRootGroup().getNodes().get(1).getConfigId(), is("bar/storage/1"));
assertThat(cluster.getRootGroup().getNodes().get(2).getDistributionKey(), is(2));
assertThat(cluster.getRootGroup().getNodes().get(2).getConfigId(), is("bar/storage/2"));
assertThat(cluster.getRootGroup().getNodes().get(3).getDistributionKey(), is(3));
assertThat(cluster.getRootGroup().getNodes().get(3).getConfigId(), is("bar/storage/3"));
}
@Test
public void testUsingNodesAndGroupCountAttributesAndGettingJustOneNode() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='3'>4</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24' groups='3'/>" +
" <engine><proton><searchable-copies>3</searchable-copies></proton></engine>" +
" </content>" +
"</services>";
int numberOfHosts = 1;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
ContainerCluster clusterControllers = cluster.getClusterControllers();
assertEquals(1, clusterControllers.getContainers().size());
assertEquals("bar-controllers", clusterControllers.getName());
assertEquals("default01", clusterControllers.getContainers().get(0).getHostName());
assertEquals(1, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(1, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(1, cluster.redundancy().effectiveReadyCopies());
assertFalse(cluster.getRootGroup().getPartitions().isPresent());
assertEquals(1, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0"));
}
@Test(expected = IllegalArgumentException.class)
public void testRequiringMoreNodesThanAreAvailable() throws ParseException {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <content version='1.0' id='bar'>" +
" <redundancy>1</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='3' required='true'/>" +
" </content>" +
"</services>";
int numberOfHosts = 2;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
tester.createModel(services, false);
}
@Test
public void testUsingNodesCountAttributesAndGettingJustOneNode() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='3.0'>" +
" <nodes count='3'/>" +
" </admin>" +
" <content version='1.0' id='bar'>" +
" <redundancy reply-after='8'>12</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='24'/>" +
" <engine><proton><searchable-copies>5</searchable-copies></proton></engine>" +
" <dispatch><num-dispatch-groups>7</num-dispatch-groups></dispatch>" +
" </content>" +
"</services>";
int numberOfHosts = 1;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, false);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
ContentCluster cluster = model.getContentClusters().get("bar");
assertEquals(1, cluster.redundancy().effectiveInitialRedundancy());
assertEquals(1, cluster.redundancy().effectiveFinalRedundancy());
assertEquals(1, cluster.redundancy().effectiveReadyCopies());
assertEquals(1, cluster.getSearch().getIndexed().getDispatchSpec().getGroups().size());
assertFalse(cluster.getRootGroup().getPartitions().isPresent());
assertEquals(1, cluster.getRootGroup().getNodes().size());
assertEquals(0, cluster.getRootGroup().getSubgroups().size());
assertThat(cluster.getRootGroup().getNodes().size(), is(1));
assertThat(cluster.getRootGroup().getNodes().get(0).getDistributionKey(), is(0));
assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0"));
}
@Test
public void testRequestingSpecificFlavors() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'>" +
" <logservers><nodes count='1' dedicated='true' flavor='logserver-flavor'/></logservers>" +
" <slobroks><nodes count='2' dedicated='true' flavor='slobrok-flavor'/></slobroks>" +
" </admin>" +
" <container version='1.0' id='container'>" +
" <nodes count='4' flavor='container-flavor'/>" +
" </container>" +
" <content version='1.0' id='foo'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <controllers><nodes count='2' dedicated='true' flavor='controller-foo-flavor'/></controllers>" +
" <nodes count='5' flavor='content-foo-flavor'/>" +
" </content>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <controllers><nodes count='3' dedicated='true' flavor='controller-bar-flavor'/></controllers>" +
" <nodes count='6' flavor='content-bar-flavor'/>" +
" </content>" +
"</services>";
int totalHosts = 23;
VespaModelTester tester = new VespaModelTester();
tester.addHosts("logserver-flavor", 1);
tester.addHosts("slobrok-flavor", 2);
tester.addHosts("container-flavor", 4);
tester.addHosts("controller-foo-flavor", 2);
tester.addHosts("content-foo-flavor", 5);
tester.addHosts("controller-bar-flavor", 3);
tester.addHosts("content-bar-flavor", 6);
VespaModel model = tester.createModel(services, true, 0);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(totalHosts));
}
@Test
public void testJDiscOnly() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<jdisc version='1.0'>" +
" <search/>" +
" <nodes count='3'/>" +
"</jdisc>";
int numberOfHosts = 3;
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertEquals(numberOfHosts, model.getRoot().getHostSystem().getHosts().size());
assertEquals(3, model.getContainerClusters().get("jdisc").getContainers().size());
assertNotNull(model.getAdmin().getLogserver());
assertEquals(3, model.getAdmin().getSlobroks().size());
}
@Test
public void testUsingHostaliasWithProvisioner() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
"<admin version='2.0'>" +
" <adminserver hostalias='node1'/>\n"+
"</admin>\n" +
"<jdisc id='mydisc' version='1.0'>" +
" <handler id='myHandler'>" +
" <component id='injected' />" +
" </handler>" +
" <nodes>" +
" <node hostalias='node1'/>" +
" </nodes>" +
"</jdisc>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().getHostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
}
@Test
public void testThatStandaloneSyntaxWorksOnHostedVespa() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>" +
"<jdisc id='foo' version='1.0'>" +
" <http>" +
" <server id='server1' port='" + getDefaults().vespaWebServicePort() + "' />" +
" </http>" +
"</jdisc>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertThat(model.getHosts().size(), is(1));
assertThat(model.getContainerClusters().size(), is(1));
}
@Test
public void testNoNodeTagMeans1Node() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <jdisc id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" </jdisc>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().getHostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes());
}
@Test
public void testNoNodeTagMeans1NodeNoContent() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <jdisc id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" </jdisc>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().getHostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
}
@Test
public void testNoNodeTagMeans1NodeNonHosted() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <jdisc id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" </jdisc>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(false);
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().getHostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
assertEquals(1, model.getContentClusters().get("bar").getRootGroup().recursiveGetNodes().size());
}
@Test
public void testSingleNodeNonHosted() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <jdisc id='foo' version='1.0'>" +
" <search/>" +
" <document-api/>" +
" <nodes><node hostalias='foo'/></nodes>"+
" </jdisc>" +
" <content version='1.0' id='bar'>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes><node hostalias='foo' distribution-key='0'/></nodes>"+
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
tester.setHosted(false);
tester.addHosts(1);
VespaModel model = tester.createModel(services, true);
assertEquals(1, model.getRoot().getHostSystem().getHosts().size());
assertEquals(1, model.getAdmin().getSlobroks().size());
assertEquals(1, model.getContainerClusters().get("foo").getContainers().size());
assertEquals(1, model.getContentClusters().get("bar").getRootGroup().countNodes());
}
/** Recreate the combination used in some factory tests */
@Test
public void testMultitenantButNotHosted() {
String services =
"<?xml version='1.0' encoding='UTF-8' ?>" +
"<services version='1.0'>" +
" <admin version='2.0'>" +
" <adminserver hostalias='node1'/>" +
" </admin>" +
" <jdisc id='default' version='1.0'>" +
" <search/>" +
" <nodes>" +
" <node hostalias='node1'/>" +
" </nodes>" +
" </jdisc>" +
" <content id='storage' version='1.0'>" +
" <redundancy>2</redundancy>" +
" <group>" +
" <node distribution-key='0' hostalias='node1'/>" +
" <node distribution-key='1' hostalias='node1'/>" +
" </group>" +
" <tuning>" +
" <cluster-controller>" +
" <transition-time>0</transition-time>" +
" </cluster-controller>" +
" </tuning>" +
" <documents>" +
" <document mode='store-only' type='type1'/>" +
" </documents>" +
" <engine>" +
" <proton/>" +
" </engine>" +
" </content>" +
" </services>";
VespaModel model = createNonProvisionedMultitenantModel(services);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(1));
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(2, content.getRootGroup().getNodes().size());
ContainerCluster controller = content.getClusterControllers();
assertEquals(1, controller.getContainers().size());
}
@Test
@Test
public void testMultitenantButNotHostedSharedContentNode() {
String services =
"<?xml version='1.0' encoding='UTF-8' ?>" +
"<services version='1.0'>" +
" <admin version='2.0'>" +
" <adminserver hostalias='node1'/>" +
" </admin>" +
" <jdisc id='default' version='1.0'>" +
" <search/>" +
" <nodes>" +
" <node hostalias='node1'/>" +
" </nodes>" +
" </jdisc>" +
" <content id='storage' version='1.0'>" +
" <redundancy>2</redundancy>" +
" <group>" +
" <node distribution-key='0' hostalias='node1'/>" +
" <node distribution-key='1' hostalias='node1'/>" +
" </group>" +
" <tuning>" +
" <cluster-controller>" +
" <transition-time>0</transition-time>" +
" </cluster-controller>" +
" </tuning>" +
" <documents>" +
" <document mode='store-only' type='type1'/>" +
" </documents>" +
" <engine>" +
" <proton/>" +
" </engine>" +
" </content>" +
" <content id='search' version='1.0'>" +
" <redundancy>2</redundancy>" +
" <group>" +
" <node distribution-key='0' hostalias='node1'/>" +
" </group>" +
" <documents>" +
" <document type='type1'/>" +
" </documents>" +
" </content>" +
" </services>";
VespaModel model = createNonProvisionedMultitenantModel(services);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(1));
ContentCluster content = model.getContentClusters().get("storage");
assertEquals(2, content.getRootGroup().getNodes().size());
ContainerCluster controller = content.getClusterControllers();
assertEquals(1, controller.getContainers().size());
}
private VespaModel createNonProvisionedMultitenantModel(String services) {
return createNonProvisionedModel(true, null, services);
}
private VespaModel createNonProvisionedModel(boolean multitenant, String hosts, String services) {
VespaModelCreatorWithMockPkg modelCreatorWithMockPkg = new VespaModelCreatorWithMockPkg(hosts, services, ApplicationPackageUtils.generateSearchDefinition("type1"));
ApplicationPackage appPkg = modelCreatorWithMockPkg.appPkg;
DeployState deployState = new DeployState.Builder().applicationPackage(appPkg).
properties((new DeployProperties.Builder()).multitenant(multitenant).build()).
build(true);
return modelCreatorWithMockPkg.create(false, deployState);
}
@Test
public void testThatTldConfigIdsAreDeterministic() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services>" +
" <admin version='4.0'/>" +
" <jdisc version='1.0' id='jdisc0'>" +
" <search/>" +
" <nodes count='2'/>" +
" </jdisc>" +
" <jdisc version='1.0' id='jdisc1'>" +
" <search/>" +
" <nodes count='2'/>" +
" </jdisc>" +
" <content version='1.0' id='content0'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
" <content version='1.0' id='content1'>" +
" <redundancy>2</redundancy>" +
" <documents>" +
" <document type='type1' mode='index'/>" +
" </documents>" +
" <nodes count='2'/>" +
" </content>" +
"</services>";
int numberOfHosts = 8;
{
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts);
VespaModel model = tester.createModel(services, true);
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
Map<String, ContentCluster> contentClusters = model.getContentClusters();
assertEquals(2, contentClusters.size());
checkThatTldAndContainerRunningOnSameHostHaveSameId(
model.getContainerClusters().values(),
model.getContentClusters().values(),
0);
}
{
VespaModelTester tester = new VespaModelTester();
tester.addHosts(numberOfHosts + 1);
VespaModel model = tester.createModel(services, true, 1, "default0");
assertThat(model.getRoot().getHostSystem().getHosts().size(), is(numberOfHosts));
Map<String, ContentCluster> contentClusters = model.getContentClusters();
assertEquals(2, contentClusters.size());
checkThatTldAndContainerRunningOnSameHostHaveSameId(
model.getContainerClusters().values(),
model.getContentClusters().values(),
1);
}
}
private void checkThatTldAndContainerRunningOnSameHostHaveSameId(Collection<ContainerCluster> containerClusters,
Collection<ContentCluster> contentClusters,
int startIndexForContainerIds) {
for (ContentCluster contentCluster : contentClusters) {
String contentClusterName = contentCluster.getName();
int i = 0;
for (ContainerCluster containerCluster : containerClusters) {
String containerClusterName = containerCluster.getName();
for (int j = 0; j < 2; j++) {
Dispatch tld = contentCluster.getSearch().getIndexed().getTLDs().get(2 * i + j);
Container container = containerCluster.getContainers().get(j);
int containerConfigIdIndex = j + startIndexForContainerIds;
assertEquals(container.getHostName(), tld.getHostname());
assertEquals(contentClusterName + "/search/cluster." + contentClusterName + "/tlds/" +
containerClusterName + "." + containerConfigIdIndex + ".tld." + containerConfigIdIndex,
tld.getConfigId());
assertEquals(containerClusterName + "/" + "container." + containerConfigIdIndex,
container.getConfigId());
}
i++;
}
}
}
private int physicalMemoryPercentage(ContainerCluster cluster) {
QrStartConfig.Builder b = new QrStartConfig.Builder();
cluster.getSearch().getConfig(b);
return new QrStartConfig(b).jvm().heapSizeAsPercentageOfPhysicalMemory();
}
@Test
public void require_that_proton_config_is_tuned_based_on_node_flavor() {
String services = joinLines("<?xml version='1.0' encoding='utf-8' ?>",
"<services>",
" <content version='1.0' id='test'>",
" <documents>",
" <document type='type1' mode='index'/>",
" </documents>",
" <nodes count='2' flavor='content-test-flavor'/>",
" </content>",
"</services>");
VespaModelTester tester = new VespaModelTester();
tester.addHosts(createFlavorFromDiskSetting("content-test-flavor", false), 2);
VespaModel model = tester.createModel(services, true, 0);
ContentSearchCluster cluster = model.getContentClusters().get("test").getSearch();
assertEquals(2, cluster.getSearchNodes().size());
assertEquals(40, getProtonConfig(cluster, 0).hwinfo().disk().writespeed(), 0.001);
assertEquals(40, getProtonConfig(cluster, 1).hwinfo().disk().writespeed(), 0.001);
}
private static Flavor createFlavorFromDiskSetting(String name, boolean fastDisk) {
return new Flavor(new FlavorsConfig.Flavor(new FlavorsConfig.Flavor.Builder().
name(name).fastDisk(fastDisk)));
}
private static ProtonConfig getProtonConfig(ContentSearchCluster cluster, int searchNodeIdx) {
ProtonConfig.Builder builder = new ProtonConfig.Builder();
List<SearchNode> searchNodes = cluster.getSearchNodes();
assertTrue(searchNodeIdx < searchNodes.size());
searchNodes.get(searchNodeIdx).getConfig(builder);
return new ProtonConfig(builder);
}
@Test
public void require_that_config_override_and_explicit_proton_tuning_have_precedence_over_default_node_flavor_tuning() {
String services = joinLines("<?xml version='1.0' encoding='utf-8' ?>",
"<services>",
" <content version='1.0' id='test'>",
" <config name='vespa.config.search.core.proton'>",
" <flush><memory><maxtlssize>2000</maxtlssize></memory></flush>",
" </config>",
" <documents>",
" <document type='type1' mode='index'/>",
" </documents>",
" <nodes count='1' flavor='content-test-flavor'/>",
" <engine>",
" <proton>",
" <tuning>",
" <searchnode>",
" <flushstrategy>",
" <native>",
" <total>",
" <maxmemorygain>1000</maxmemorygain>",
" </total>",
" </native>",
" </flushstrategy>",
" </searchnode>",
" </tuning>",
" </proton>",
" </engine>",
" </content>",
"</services>");
VespaModelTester tester = new VespaModelTester();
tester.addHosts("default", 1);
tester.addHosts(createFlavorFromMemoryAndDisk("content-test-flavor", 128, 100), 1);
VespaModel model = tester.createModel(services, true, 0);
ContentSearchCluster cluster = model.getContentClusters().get("test").getSearch();
ProtonConfig cfg = getProtonConfig(model, cluster.getSearchNodes().get(0).getConfigId());
assertEquals(2000, cfg.flush().memory().maxtlssize());
assertEquals(1000, cfg.flush().memory().maxmemory());
assertEquals((long) 16 * GB, cfg.flush().memory().each().maxmemory());
}
private static long GB = 1024 * 1024 * 1024;
private static Flavor createFlavorFromMemoryAndDisk(String name, int memoryGb, int diskGb) {
return new Flavor(new FlavorsConfig.Flavor(new FlavorsConfig.Flavor.Builder().
name(name).minMainMemoryAvailableGb(memoryGb).minDiskAvailableGb(diskGb)));
}
private static ProtonConfig getProtonConfig(VespaModel model, String configId) {
ProtonConfig.Builder builder = new ProtonConfig.Builder();
model.getConfig(builder, configId);
return new ProtonConfig(builder);
}
} |
Isn't the openstackid supposed to be the instance id in aws? | private static NodeRepositoryNode nodeRepositoryNodeFromAddNode(AddNode addNode) {
NodeRepositoryNode node = new NodeRepositoryNode();
node.openStackId = "fake-" + addNode.hostname;
node.hostname = addNode.hostname;
node.parentHostname = addNode.parentHostname.orElse(null);
node.flavor = addNode.nodeFlavor;
node.type = addNode.nodeType.name();
node.ipAddresses = addNode.ipAddresses;
node.additionalIpAddresses = addNode.additionalIpAddresses;
return node;
} | node.openStackId = "fake-" + addNode.hostname; | private static NodeRepositoryNode nodeRepositoryNodeFromAddNode(AddNode addNode) {
NodeRepositoryNode node = new NodeRepositoryNode();
node.openStackId = "fake-" + addNode.hostname;
node.hostname = addNode.hostname;
node.parentHostname = addNode.parentHostname.orElse(null);
node.flavor = addNode.nodeFlavor;
node.type = addNode.nodeType.name();
node.ipAddresses = addNode.ipAddresses;
node.additionalIpAddresses = addNode.additionalIpAddresses;
return node;
} | class RealNodeRepository implements NodeRepository {
private static final PrefixLogger NODE_ADMIN_LOGGER = PrefixLogger.getNodeAdminLogger(RealNodeRepository.class);
private final ConfigServerApi configServerApi;
public RealNodeRepository(ConfigServerApi configServerApi) {
this.configServerApi = configServerApi;
}
@Override
public void addNodes(List<AddNode> nodes) {
List<NodeRepositoryNode> nodesToPost = nodes.stream()
.map(RealNodeRepository::nodeRepositoryNodeFromAddNode)
.collect(Collectors.toList());
NodeMessageResponse response = configServerApi.post("/nodes/v2/node", nodesToPost, NodeMessageResponse.class);
if (!Strings.isNullOrEmpty(response.errorCode)) {
throw new RuntimeException("Failed to add nodes to node-repo: " + response.message + " " + response.errorCode);
}
}
@Override
public List<NodeSpec> getNodes(String baseHostName) {
String path = "/nodes/v2/node/?recursive=true&parentHost=" + baseHostName;
final GetNodesResponse nodesForHost = configServerApi.get(path, GetNodesResponse.class);
return nodesForHost.nodes.stream()
.map(RealNodeRepository::createNodeSpec)
.collect(Collectors.toList());
}
@Override
public Optional<NodeSpec> getNode(String hostName) {
try {
NodeRepositoryNode nodeResponse = configServerApi.get("/nodes/v2/node/" + hostName,
NodeRepositoryNode.class);
if (nodeResponse == null) {
return Optional.empty();
}
return Optional.of(createNodeSpec(nodeResponse));
} catch (HttpException.NotFoundException | HttpException.ForbiddenException e) {
return Optional.empty();
}
}
/**
* Get all ACLs that belongs to a hostname. Usually this is a parent host and all
* ACLs for child nodes are returned.
*/
@Override
public Map<String, Acl> getAcls(String hostName) {
try {
final String path = String.format("/nodes/v2/acl/%s?children=true", hostName);
final GetAclResponse response = configServerApi.get(path, GetAclResponse.class);
Map<String, List<Integer>> trustedPorts = response.trustedPorts.stream()
.collect(Collectors.groupingBy(
GetAclResponse.Port::getTrustedBy,
Collectors.mapping(port -> port.port, Collectors.toList())));
Map<String, List<InetAddress>> trustedNodes = response.trustedNodes.stream()
.collect(Collectors.groupingBy(
GetAclResponse.Node::getTrustedBy,
Collectors.mapping(node -> InetAddresses.forString(node.ipAddress), Collectors.toList())));
return Stream.of(trustedNodes.keySet(), trustedPorts.keySet())
.flatMap(Set::stream)
.distinct()
.collect(Collectors.toMap(
Function.identity(),
hostname -> new Acl(trustedPorts.get(hostname), trustedNodes.get(hostname))));
} catch (HttpException.NotFoundException e) {
NODE_ADMIN_LOGGER.warning("Failed to fetch ACLs for " + hostName + " No ACL will be applied");
}
return Collections.emptyMap();
}
@Override
public void updateNodeAttributes(String hostName, NodeAttributes nodeAttributes) {
NodeMessageResponse response = configServerApi.patch(
"/nodes/v2/node/" + hostName,
nodeRepositoryNodeFromNodeAttributes(nodeAttributes),
NodeMessageResponse.class);
if (!Strings.isNullOrEmpty(response.errorCode)) {
throw new RuntimeException("Unexpected message " + response.message + " " + response.errorCode);
}
}
@Override
public void setNodeState(String hostName, Node.State nodeState) {
String state = nodeState.name();
NodeMessageResponse response = configServerApi.put(
"/nodes/v2/state/" + state + "/" + hostName,
Optional.empty(), /* body */
NodeMessageResponse.class);
NODE_ADMIN_LOGGER.info(response.message);
if (response.errorCode == null || response.errorCode.isEmpty()) {
return;
}
throw new RuntimeException("Unexpected message " + response.message + " " + response.errorCode);
}
private static NodeSpec createNodeSpec(NodeRepositoryNode node)
throws IllegalArgumentException, NullPointerException {
Objects.requireNonNull(node.type, "Unknown node type");
NodeType nodeType = NodeType.valueOf(node.type);
Objects.requireNonNull(node.state, "Unknown node state");
Node.State nodeState = Node.State.valueOf(node.state);
if (nodeState == Node.State.active) {
Objects.requireNonNull(node.wantedVespaVersion, "Unknown vespa version for active node");
Objects.requireNonNull(node.wantedDockerImage, "Unknown docker image for active node");
Objects.requireNonNull(node.restartGeneration, "Unknown restartGeneration for active node");
Objects.requireNonNull(node.currentRestartGeneration, "Unknown currentRestartGeneration for active node");
}
String hostName = Objects.requireNonNull(node.hostname, "hostname is null");
NodeSpec.Owner owner = null;
if (node.owner != null) {
owner = new NodeSpec.Owner(node.owner.tenant, node.owner.application, node.owner.instance);
}
NodeSpec.Membership membership = null;
if (node.membership != null) {
membership = new NodeSpec.Membership(node.membership.clusterType, node.membership.clusterId,
node.membership.group, node.membership.index, node.membership.retired);
}
return new NodeSpec(
hostName,
Optional.ofNullable(node.wantedDockerImage).map(DockerImage::new),
Optional.ofNullable(node.currentDockerImage).map(DockerImage::new),
nodeState,
nodeType,
node.flavor,
node.canonicalFlavor,
Optional.ofNullable(node.wantedVespaVersion),
Optional.ofNullable(node.vespaVersion),
Optional.ofNullable(node.allowedToBeDown),
Optional.ofNullable(owner),
Optional.ofNullable(membership),
Optional.ofNullable(node.restartGeneration),
Optional.ofNullable(node.currentRestartGeneration),
node.rebootGeneration,
node.currentRebootGeneration,
node.minCpuCores,
node.minMainMemoryAvailableGb,
node.minDiskAvailableGb,
node.fastDisk,
node.ipAddresses,
Optional.ofNullable(node.hardwareDivergence),
Optional.ofNullable(node.parentHostname));
}
private static NodeRepositoryNode nodeRepositoryNodeFromNodeAttributes(NodeAttributes nodeAttributes) {
NodeRepositoryNode node = new NodeRepositoryNode();
node.currentDockerImage = Optional.ofNullable(nodeAttributes.getDockerImage()).map(DockerImage::asString).orElse(null);
node.currentRestartGeneration = nodeAttributes.getRestartGeneration();
node.currentRebootGeneration = nodeAttributes.getRebootGeneration();
node.hardwareDivergence = nodeAttributes.getHardwareDivergence();
return node;
}
} | class RealNodeRepository implements NodeRepository {
private static final PrefixLogger NODE_ADMIN_LOGGER = PrefixLogger.getNodeAdminLogger(RealNodeRepository.class);
private final ConfigServerApi configServerApi;
public RealNodeRepository(ConfigServerApi configServerApi) {
this.configServerApi = configServerApi;
}
@Override
public void addNodes(List<AddNode> nodes) {
List<NodeRepositoryNode> nodesToPost = nodes.stream()
.map(RealNodeRepository::nodeRepositoryNodeFromAddNode)
.collect(Collectors.toList());
NodeMessageResponse response = configServerApi.post("/nodes/v2/node", nodesToPost, NodeMessageResponse.class);
if (!Strings.isNullOrEmpty(response.errorCode)) {
throw new RuntimeException("Failed to add nodes to node-repo: " + response.message + " " + response.errorCode);
}
}
@Override
public List<NodeSpec> getNodes(String baseHostName) {
String path = "/nodes/v2/node/?recursive=true&parentHost=" + baseHostName;
final GetNodesResponse nodesForHost = configServerApi.get(path, GetNodesResponse.class);
return nodesForHost.nodes.stream()
.map(RealNodeRepository::createNodeSpec)
.collect(Collectors.toList());
}
@Override
public Optional<NodeSpec> getNode(String hostName) {
try {
NodeRepositoryNode nodeResponse = configServerApi.get("/nodes/v2/node/" + hostName,
NodeRepositoryNode.class);
if (nodeResponse == null) {
return Optional.empty();
}
return Optional.of(createNodeSpec(nodeResponse));
} catch (HttpException.NotFoundException | HttpException.ForbiddenException e) {
return Optional.empty();
}
}
/**
* Get all ACLs that belongs to a hostname. Usually this is a parent host and all
* ACLs for child nodes are returned.
*/
@Override
public Map<String, Acl> getAcls(String hostName) {
try {
final String path = String.format("/nodes/v2/acl/%s?children=true", hostName);
final GetAclResponse response = configServerApi.get(path, GetAclResponse.class);
Map<String, List<Integer>> trustedPorts = response.trustedPorts.stream()
.collect(Collectors.groupingBy(
GetAclResponse.Port::getTrustedBy,
Collectors.mapping(port -> port.port, Collectors.toList())));
Map<String, List<InetAddress>> trustedNodes = response.trustedNodes.stream()
.collect(Collectors.groupingBy(
GetAclResponse.Node::getTrustedBy,
Collectors.mapping(node -> InetAddresses.forString(node.ipAddress), Collectors.toList())));
return Stream.of(trustedNodes.keySet(), trustedPorts.keySet())
.flatMap(Set::stream)
.distinct()
.collect(Collectors.toMap(
Function.identity(),
hostname -> new Acl(trustedPorts.get(hostname), trustedNodes.get(hostname))));
} catch (HttpException.NotFoundException e) {
NODE_ADMIN_LOGGER.warning("Failed to fetch ACLs for " + hostName + " No ACL will be applied");
}
return Collections.emptyMap();
}
@Override
public void updateNodeAttributes(String hostName, NodeAttributes nodeAttributes) {
NodeMessageResponse response = configServerApi.patch(
"/nodes/v2/node/" + hostName,
nodeRepositoryNodeFromNodeAttributes(nodeAttributes),
NodeMessageResponse.class);
if (!Strings.isNullOrEmpty(response.errorCode)) {
throw new RuntimeException("Unexpected message " + response.message + " " + response.errorCode);
}
}
@Override
public void setNodeState(String hostName, Node.State nodeState) {
String state = nodeState.name();
NodeMessageResponse response = configServerApi.put(
"/nodes/v2/state/" + state + "/" + hostName,
Optional.empty(), /* body */
NodeMessageResponse.class);
NODE_ADMIN_LOGGER.info(response.message);
if (response.errorCode == null || response.errorCode.isEmpty()) {
return;
}
throw new RuntimeException("Unexpected message " + response.message + " " + response.errorCode);
}
private static NodeSpec createNodeSpec(NodeRepositoryNode node)
throws IllegalArgumentException, NullPointerException {
Objects.requireNonNull(node.type, "Unknown node type");
NodeType nodeType = NodeType.valueOf(node.type);
Objects.requireNonNull(node.state, "Unknown node state");
Node.State nodeState = Node.State.valueOf(node.state);
if (nodeState == Node.State.active) {
Objects.requireNonNull(node.wantedVespaVersion, "Unknown vespa version for active node");
Objects.requireNonNull(node.wantedDockerImage, "Unknown docker image for active node");
Objects.requireNonNull(node.restartGeneration, "Unknown restartGeneration for active node");
Objects.requireNonNull(node.currentRestartGeneration, "Unknown currentRestartGeneration for active node");
}
String hostName = Objects.requireNonNull(node.hostname, "hostname is null");
NodeSpec.Owner owner = null;
if (node.owner != null) {
owner = new NodeSpec.Owner(node.owner.tenant, node.owner.application, node.owner.instance);
}
NodeSpec.Membership membership = null;
if (node.membership != null) {
membership = new NodeSpec.Membership(node.membership.clusterType, node.membership.clusterId,
node.membership.group, node.membership.index, node.membership.retired);
}
return new NodeSpec(
hostName,
Optional.ofNullable(node.wantedDockerImage).map(DockerImage::new),
Optional.ofNullable(node.currentDockerImage).map(DockerImage::new),
nodeState,
nodeType,
node.flavor,
node.canonicalFlavor,
Optional.ofNullable(node.wantedVespaVersion),
Optional.ofNullable(node.vespaVersion),
Optional.ofNullable(node.allowedToBeDown),
Optional.ofNullable(owner),
Optional.ofNullable(membership),
Optional.ofNullable(node.restartGeneration),
Optional.ofNullable(node.currentRestartGeneration),
node.rebootGeneration,
node.currentRebootGeneration,
node.minCpuCores,
node.minMainMemoryAvailableGb,
node.minDiskAvailableGb,
node.fastDisk,
node.ipAddresses,
Optional.ofNullable(node.hardwareDivergence),
Optional.ofNullable(node.parentHostname));
}
private static NodeRepositoryNode nodeRepositoryNodeFromNodeAttributes(NodeAttributes nodeAttributes) {
NodeRepositoryNode node = new NodeRepositoryNode();
node.currentDockerImage = Optional.ofNullable(nodeAttributes.getDockerImage()).map(DockerImage::asString).orElse(null);
node.currentRestartGeneration = nodeAttributes.getRestartGeneration();
node.currentRebootGeneration = nodeAttributes.getRebootGeneration();
node.hardwareDivergence = nodeAttributes.getHardwareDivergence();
return node;
}
} |
I was hoping this was only needed for tenant hosts since we dont create own certificate for the config container, not sure. Checking with Morten. Either way, there is no easy way to achieve this here. We would have to set up CKMS and give cfghost permission to talk to AWS... :( | private static NodeRepositoryNode nodeRepositoryNodeFromAddNode(AddNode addNode) {
NodeRepositoryNode node = new NodeRepositoryNode();
node.openStackId = "fake-" + addNode.hostname;
node.hostname = addNode.hostname;
node.parentHostname = addNode.parentHostname.orElse(null);
node.flavor = addNode.nodeFlavor;
node.type = addNode.nodeType.name();
node.ipAddresses = addNode.ipAddresses;
node.additionalIpAddresses = addNode.additionalIpAddresses;
return node;
} | node.openStackId = "fake-" + addNode.hostname; | private static NodeRepositoryNode nodeRepositoryNodeFromAddNode(AddNode addNode) {
NodeRepositoryNode node = new NodeRepositoryNode();
node.openStackId = "fake-" + addNode.hostname;
node.hostname = addNode.hostname;
node.parentHostname = addNode.parentHostname.orElse(null);
node.flavor = addNode.nodeFlavor;
node.type = addNode.nodeType.name();
node.ipAddresses = addNode.ipAddresses;
node.additionalIpAddresses = addNode.additionalIpAddresses;
return node;
} | class RealNodeRepository implements NodeRepository {
private static final PrefixLogger NODE_ADMIN_LOGGER = PrefixLogger.getNodeAdminLogger(RealNodeRepository.class);
private final ConfigServerApi configServerApi;
public RealNodeRepository(ConfigServerApi configServerApi) {
this.configServerApi = configServerApi;
}
@Override
public void addNodes(List<AddNode> nodes) {
List<NodeRepositoryNode> nodesToPost = nodes.stream()
.map(RealNodeRepository::nodeRepositoryNodeFromAddNode)
.collect(Collectors.toList());
NodeMessageResponse response = configServerApi.post("/nodes/v2/node", nodesToPost, NodeMessageResponse.class);
if (!Strings.isNullOrEmpty(response.errorCode)) {
throw new RuntimeException("Failed to add nodes to node-repo: " + response.message + " " + response.errorCode);
}
}
@Override
public List<NodeSpec> getNodes(String baseHostName) {
String path = "/nodes/v2/node/?recursive=true&parentHost=" + baseHostName;
final GetNodesResponse nodesForHost = configServerApi.get(path, GetNodesResponse.class);
return nodesForHost.nodes.stream()
.map(RealNodeRepository::createNodeSpec)
.collect(Collectors.toList());
}
@Override
public Optional<NodeSpec> getNode(String hostName) {
try {
NodeRepositoryNode nodeResponse = configServerApi.get("/nodes/v2/node/" + hostName,
NodeRepositoryNode.class);
if (nodeResponse == null) {
return Optional.empty();
}
return Optional.of(createNodeSpec(nodeResponse));
} catch (HttpException.NotFoundException | HttpException.ForbiddenException e) {
return Optional.empty();
}
}
/**
* Get all ACLs that belongs to a hostname. Usually this is a parent host and all
* ACLs for child nodes are returned.
*/
@Override
public Map<String, Acl> getAcls(String hostName) {
try {
final String path = String.format("/nodes/v2/acl/%s?children=true", hostName);
final GetAclResponse response = configServerApi.get(path, GetAclResponse.class);
Map<String, List<Integer>> trustedPorts = response.trustedPorts.stream()
.collect(Collectors.groupingBy(
GetAclResponse.Port::getTrustedBy,
Collectors.mapping(port -> port.port, Collectors.toList())));
Map<String, List<InetAddress>> trustedNodes = response.trustedNodes.stream()
.collect(Collectors.groupingBy(
GetAclResponse.Node::getTrustedBy,
Collectors.mapping(node -> InetAddresses.forString(node.ipAddress), Collectors.toList())));
return Stream.of(trustedNodes.keySet(), trustedPorts.keySet())
.flatMap(Set::stream)
.distinct()
.collect(Collectors.toMap(
Function.identity(),
hostname -> new Acl(trustedPorts.get(hostname), trustedNodes.get(hostname))));
} catch (HttpException.NotFoundException e) {
NODE_ADMIN_LOGGER.warning("Failed to fetch ACLs for " + hostName + " No ACL will be applied");
}
return Collections.emptyMap();
}
@Override
public void updateNodeAttributes(String hostName, NodeAttributes nodeAttributes) {
NodeMessageResponse response = configServerApi.patch(
"/nodes/v2/node/" + hostName,
nodeRepositoryNodeFromNodeAttributes(nodeAttributes),
NodeMessageResponse.class);
if (!Strings.isNullOrEmpty(response.errorCode)) {
throw new RuntimeException("Unexpected message " + response.message + " " + response.errorCode);
}
}
@Override
public void setNodeState(String hostName, Node.State nodeState) {
String state = nodeState.name();
NodeMessageResponse response = configServerApi.put(
"/nodes/v2/state/" + state + "/" + hostName,
Optional.empty(), /* body */
NodeMessageResponse.class);
NODE_ADMIN_LOGGER.info(response.message);
if (response.errorCode == null || response.errorCode.isEmpty()) {
return;
}
throw new RuntimeException("Unexpected message " + response.message + " " + response.errorCode);
}
private static NodeSpec createNodeSpec(NodeRepositoryNode node)
throws IllegalArgumentException, NullPointerException {
Objects.requireNonNull(node.type, "Unknown node type");
NodeType nodeType = NodeType.valueOf(node.type);
Objects.requireNonNull(node.state, "Unknown node state");
Node.State nodeState = Node.State.valueOf(node.state);
if (nodeState == Node.State.active) {
Objects.requireNonNull(node.wantedVespaVersion, "Unknown vespa version for active node");
Objects.requireNonNull(node.wantedDockerImage, "Unknown docker image for active node");
Objects.requireNonNull(node.restartGeneration, "Unknown restartGeneration for active node");
Objects.requireNonNull(node.currentRestartGeneration, "Unknown currentRestartGeneration for active node");
}
String hostName = Objects.requireNonNull(node.hostname, "hostname is null");
NodeSpec.Owner owner = null;
if (node.owner != null) {
owner = new NodeSpec.Owner(node.owner.tenant, node.owner.application, node.owner.instance);
}
NodeSpec.Membership membership = null;
if (node.membership != null) {
membership = new NodeSpec.Membership(node.membership.clusterType, node.membership.clusterId,
node.membership.group, node.membership.index, node.membership.retired);
}
return new NodeSpec(
hostName,
Optional.ofNullable(node.wantedDockerImage).map(DockerImage::new),
Optional.ofNullable(node.currentDockerImage).map(DockerImage::new),
nodeState,
nodeType,
node.flavor,
node.canonicalFlavor,
Optional.ofNullable(node.wantedVespaVersion),
Optional.ofNullable(node.vespaVersion),
Optional.ofNullable(node.allowedToBeDown),
Optional.ofNullable(owner),
Optional.ofNullable(membership),
Optional.ofNullable(node.restartGeneration),
Optional.ofNullable(node.currentRestartGeneration),
node.rebootGeneration,
node.currentRebootGeneration,
node.minCpuCores,
node.minMainMemoryAvailableGb,
node.minDiskAvailableGb,
node.fastDisk,
node.ipAddresses,
Optional.ofNullable(node.hardwareDivergence),
Optional.ofNullable(node.parentHostname));
}
private static NodeRepositoryNode nodeRepositoryNodeFromNodeAttributes(NodeAttributes nodeAttributes) {
NodeRepositoryNode node = new NodeRepositoryNode();
node.currentDockerImage = Optional.ofNullable(nodeAttributes.getDockerImage()).map(DockerImage::asString).orElse(null);
node.currentRestartGeneration = nodeAttributes.getRestartGeneration();
node.currentRebootGeneration = nodeAttributes.getRebootGeneration();
node.hardwareDivergence = nodeAttributes.getHardwareDivergence();
return node;
}
} | class RealNodeRepository implements NodeRepository {
private static final PrefixLogger NODE_ADMIN_LOGGER = PrefixLogger.getNodeAdminLogger(RealNodeRepository.class);
private final ConfigServerApi configServerApi;
public RealNodeRepository(ConfigServerApi configServerApi) {
this.configServerApi = configServerApi;
}
@Override
public void addNodes(List<AddNode> nodes) {
List<NodeRepositoryNode> nodesToPost = nodes.stream()
.map(RealNodeRepository::nodeRepositoryNodeFromAddNode)
.collect(Collectors.toList());
NodeMessageResponse response = configServerApi.post("/nodes/v2/node", nodesToPost, NodeMessageResponse.class);
if (!Strings.isNullOrEmpty(response.errorCode)) {
throw new RuntimeException("Failed to add nodes to node-repo: " + response.message + " " + response.errorCode);
}
}
@Override
public List<NodeSpec> getNodes(String baseHostName) {
String path = "/nodes/v2/node/?recursive=true&parentHost=" + baseHostName;
final GetNodesResponse nodesForHost = configServerApi.get(path, GetNodesResponse.class);
return nodesForHost.nodes.stream()
.map(RealNodeRepository::createNodeSpec)
.collect(Collectors.toList());
}
@Override
public Optional<NodeSpec> getNode(String hostName) {
try {
NodeRepositoryNode nodeResponse = configServerApi.get("/nodes/v2/node/" + hostName,
NodeRepositoryNode.class);
if (nodeResponse == null) {
return Optional.empty();
}
return Optional.of(createNodeSpec(nodeResponse));
} catch (HttpException.NotFoundException | HttpException.ForbiddenException e) {
return Optional.empty();
}
}
/**
* Get all ACLs that belongs to a hostname. Usually this is a parent host and all
* ACLs for child nodes are returned.
*/
@Override
public Map<String, Acl> getAcls(String hostName) {
try {
final String path = String.format("/nodes/v2/acl/%s?children=true", hostName);
final GetAclResponse response = configServerApi.get(path, GetAclResponse.class);
Map<String, List<Integer>> trustedPorts = response.trustedPorts.stream()
.collect(Collectors.groupingBy(
GetAclResponse.Port::getTrustedBy,
Collectors.mapping(port -> port.port, Collectors.toList())));
Map<String, List<InetAddress>> trustedNodes = response.trustedNodes.stream()
.collect(Collectors.groupingBy(
GetAclResponse.Node::getTrustedBy,
Collectors.mapping(node -> InetAddresses.forString(node.ipAddress), Collectors.toList())));
return Stream.of(trustedNodes.keySet(), trustedPorts.keySet())
.flatMap(Set::stream)
.distinct()
.collect(Collectors.toMap(
Function.identity(),
hostname -> new Acl(trustedPorts.get(hostname), trustedNodes.get(hostname))));
} catch (HttpException.NotFoundException e) {
NODE_ADMIN_LOGGER.warning("Failed to fetch ACLs for " + hostName + " No ACL will be applied");
}
return Collections.emptyMap();
}
@Override
public void updateNodeAttributes(String hostName, NodeAttributes nodeAttributes) {
NodeMessageResponse response = configServerApi.patch(
"/nodes/v2/node/" + hostName,
nodeRepositoryNodeFromNodeAttributes(nodeAttributes),
NodeMessageResponse.class);
if (!Strings.isNullOrEmpty(response.errorCode)) {
throw new RuntimeException("Unexpected message " + response.message + " " + response.errorCode);
}
}
@Override
public void setNodeState(String hostName, Node.State nodeState) {
String state = nodeState.name();
NodeMessageResponse response = configServerApi.put(
"/nodes/v2/state/" + state + "/" + hostName,
Optional.empty(), /* body */
NodeMessageResponse.class);
NODE_ADMIN_LOGGER.info(response.message);
if (response.errorCode == null || response.errorCode.isEmpty()) {
return;
}
throw new RuntimeException("Unexpected message " + response.message + " " + response.errorCode);
}
private static NodeSpec createNodeSpec(NodeRepositoryNode node)
throws IllegalArgumentException, NullPointerException {
Objects.requireNonNull(node.type, "Unknown node type");
NodeType nodeType = NodeType.valueOf(node.type);
Objects.requireNonNull(node.state, "Unknown node state");
Node.State nodeState = Node.State.valueOf(node.state);
if (nodeState == Node.State.active) {
Objects.requireNonNull(node.wantedVespaVersion, "Unknown vespa version for active node");
Objects.requireNonNull(node.wantedDockerImage, "Unknown docker image for active node");
Objects.requireNonNull(node.restartGeneration, "Unknown restartGeneration for active node");
Objects.requireNonNull(node.currentRestartGeneration, "Unknown currentRestartGeneration for active node");
}
String hostName = Objects.requireNonNull(node.hostname, "hostname is null");
NodeSpec.Owner owner = null;
if (node.owner != null) {
owner = new NodeSpec.Owner(node.owner.tenant, node.owner.application, node.owner.instance);
}
NodeSpec.Membership membership = null;
if (node.membership != null) {
membership = new NodeSpec.Membership(node.membership.clusterType, node.membership.clusterId,
node.membership.group, node.membership.index, node.membership.retired);
}
return new NodeSpec(
hostName,
Optional.ofNullable(node.wantedDockerImage).map(DockerImage::new),
Optional.ofNullable(node.currentDockerImage).map(DockerImage::new),
nodeState,
nodeType,
node.flavor,
node.canonicalFlavor,
Optional.ofNullable(node.wantedVespaVersion),
Optional.ofNullable(node.vespaVersion),
Optional.ofNullable(node.allowedToBeDown),
Optional.ofNullable(owner),
Optional.ofNullable(membership),
Optional.ofNullable(node.restartGeneration),
Optional.ofNullable(node.currentRestartGeneration),
node.rebootGeneration,
node.currentRebootGeneration,
node.minCpuCores,
node.minMainMemoryAvailableGb,
node.minDiskAvailableGb,
node.fastDisk,
node.ipAddresses,
Optional.ofNullable(node.hardwareDivergence),
Optional.ofNullable(node.parentHostname));
}
private static NodeRepositoryNode nodeRepositoryNodeFromNodeAttributes(NodeAttributes nodeAttributes) {
NodeRepositoryNode node = new NodeRepositoryNode();
node.currentDockerImage = Optional.ofNullable(nodeAttributes.getDockerImage()).map(DockerImage::asString).orElse(null);
node.currentRestartGeneration = nodeAttributes.getRestartGeneration();
node.currentRebootGeneration = nodeAttributes.getRebootGeneration();
node.hardwareDivergence = nodeAttributes.getHardwareDivergence();
return node;
}
} |
Apparently we don't need this, and will just verify the athenz domain and service name only. | private static NodeRepositoryNode nodeRepositoryNodeFromAddNode(AddNode addNode) {
NodeRepositoryNode node = new NodeRepositoryNode();
node.openStackId = "fake-" + addNode.hostname;
node.hostname = addNode.hostname;
node.parentHostname = addNode.parentHostname.orElse(null);
node.flavor = addNode.nodeFlavor;
node.type = addNode.nodeType.name();
node.ipAddresses = addNode.ipAddresses;
node.additionalIpAddresses = addNode.additionalIpAddresses;
return node;
} | node.openStackId = "fake-" + addNode.hostname; | private static NodeRepositoryNode nodeRepositoryNodeFromAddNode(AddNode addNode) {
NodeRepositoryNode node = new NodeRepositoryNode();
node.openStackId = "fake-" + addNode.hostname;
node.hostname = addNode.hostname;
node.parentHostname = addNode.parentHostname.orElse(null);
node.flavor = addNode.nodeFlavor;
node.type = addNode.nodeType.name();
node.ipAddresses = addNode.ipAddresses;
node.additionalIpAddresses = addNode.additionalIpAddresses;
return node;
} | class RealNodeRepository implements NodeRepository {
private static final PrefixLogger NODE_ADMIN_LOGGER = PrefixLogger.getNodeAdminLogger(RealNodeRepository.class);
private final ConfigServerApi configServerApi;
public RealNodeRepository(ConfigServerApi configServerApi) {
this.configServerApi = configServerApi;
}
@Override
public void addNodes(List<AddNode> nodes) {
List<NodeRepositoryNode> nodesToPost = nodes.stream()
.map(RealNodeRepository::nodeRepositoryNodeFromAddNode)
.collect(Collectors.toList());
NodeMessageResponse response = configServerApi.post("/nodes/v2/node", nodesToPost, NodeMessageResponse.class);
if (!Strings.isNullOrEmpty(response.errorCode)) {
throw new RuntimeException("Failed to add nodes to node-repo: " + response.message + " " + response.errorCode);
}
}
@Override
public List<NodeSpec> getNodes(String baseHostName) {
String path = "/nodes/v2/node/?recursive=true&parentHost=" + baseHostName;
final GetNodesResponse nodesForHost = configServerApi.get(path, GetNodesResponse.class);
return nodesForHost.nodes.stream()
.map(RealNodeRepository::createNodeSpec)
.collect(Collectors.toList());
}
@Override
public Optional<NodeSpec> getNode(String hostName) {
try {
NodeRepositoryNode nodeResponse = configServerApi.get("/nodes/v2/node/" + hostName,
NodeRepositoryNode.class);
if (nodeResponse == null) {
return Optional.empty();
}
return Optional.of(createNodeSpec(nodeResponse));
} catch (HttpException.NotFoundException | HttpException.ForbiddenException e) {
return Optional.empty();
}
}
/**
* Get all ACLs that belongs to a hostname. Usually this is a parent host and all
* ACLs for child nodes are returned.
*/
@Override
public Map<String, Acl> getAcls(String hostName) {
try {
final String path = String.format("/nodes/v2/acl/%s?children=true", hostName);
final GetAclResponse response = configServerApi.get(path, GetAclResponse.class);
Map<String, List<Integer>> trustedPorts = response.trustedPorts.stream()
.collect(Collectors.groupingBy(
GetAclResponse.Port::getTrustedBy,
Collectors.mapping(port -> port.port, Collectors.toList())));
Map<String, List<InetAddress>> trustedNodes = response.trustedNodes.stream()
.collect(Collectors.groupingBy(
GetAclResponse.Node::getTrustedBy,
Collectors.mapping(node -> InetAddresses.forString(node.ipAddress), Collectors.toList())));
return Stream.of(trustedNodes.keySet(), trustedPorts.keySet())
.flatMap(Set::stream)
.distinct()
.collect(Collectors.toMap(
Function.identity(),
hostname -> new Acl(trustedPorts.get(hostname), trustedNodes.get(hostname))));
} catch (HttpException.NotFoundException e) {
NODE_ADMIN_LOGGER.warning("Failed to fetch ACLs for " + hostName + " No ACL will be applied");
}
return Collections.emptyMap();
}
@Override
public void updateNodeAttributes(String hostName, NodeAttributes nodeAttributes) {
NodeMessageResponse response = configServerApi.patch(
"/nodes/v2/node/" + hostName,
nodeRepositoryNodeFromNodeAttributes(nodeAttributes),
NodeMessageResponse.class);
if (!Strings.isNullOrEmpty(response.errorCode)) {
throw new RuntimeException("Unexpected message " + response.message + " " + response.errorCode);
}
}
@Override
public void setNodeState(String hostName, Node.State nodeState) {
String state = nodeState.name();
NodeMessageResponse response = configServerApi.put(
"/nodes/v2/state/" + state + "/" + hostName,
Optional.empty(), /* body */
NodeMessageResponse.class);
NODE_ADMIN_LOGGER.info(response.message);
if (response.errorCode == null || response.errorCode.isEmpty()) {
return;
}
throw new RuntimeException("Unexpected message " + response.message + " " + response.errorCode);
}
private static NodeSpec createNodeSpec(NodeRepositoryNode node)
throws IllegalArgumentException, NullPointerException {
Objects.requireNonNull(node.type, "Unknown node type");
NodeType nodeType = NodeType.valueOf(node.type);
Objects.requireNonNull(node.state, "Unknown node state");
Node.State nodeState = Node.State.valueOf(node.state);
if (nodeState == Node.State.active) {
Objects.requireNonNull(node.wantedVespaVersion, "Unknown vespa version for active node");
Objects.requireNonNull(node.wantedDockerImage, "Unknown docker image for active node");
Objects.requireNonNull(node.restartGeneration, "Unknown restartGeneration for active node");
Objects.requireNonNull(node.currentRestartGeneration, "Unknown currentRestartGeneration for active node");
}
String hostName = Objects.requireNonNull(node.hostname, "hostname is null");
NodeSpec.Owner owner = null;
if (node.owner != null) {
owner = new NodeSpec.Owner(node.owner.tenant, node.owner.application, node.owner.instance);
}
NodeSpec.Membership membership = null;
if (node.membership != null) {
membership = new NodeSpec.Membership(node.membership.clusterType, node.membership.clusterId,
node.membership.group, node.membership.index, node.membership.retired);
}
return new NodeSpec(
hostName,
Optional.ofNullable(node.wantedDockerImage).map(DockerImage::new),
Optional.ofNullable(node.currentDockerImage).map(DockerImage::new),
nodeState,
nodeType,
node.flavor,
node.canonicalFlavor,
Optional.ofNullable(node.wantedVespaVersion),
Optional.ofNullable(node.vespaVersion),
Optional.ofNullable(node.allowedToBeDown),
Optional.ofNullable(owner),
Optional.ofNullable(membership),
Optional.ofNullable(node.restartGeneration),
Optional.ofNullable(node.currentRestartGeneration),
node.rebootGeneration,
node.currentRebootGeneration,
node.minCpuCores,
node.minMainMemoryAvailableGb,
node.minDiskAvailableGb,
node.fastDisk,
node.ipAddresses,
Optional.ofNullable(node.hardwareDivergence),
Optional.ofNullable(node.parentHostname));
}
private static NodeRepositoryNode nodeRepositoryNodeFromNodeAttributes(NodeAttributes nodeAttributes) {
NodeRepositoryNode node = new NodeRepositoryNode();
node.currentDockerImage = Optional.ofNullable(nodeAttributes.getDockerImage()).map(DockerImage::asString).orElse(null);
node.currentRestartGeneration = nodeAttributes.getRestartGeneration();
node.currentRebootGeneration = nodeAttributes.getRebootGeneration();
node.hardwareDivergence = nodeAttributes.getHardwareDivergence();
return node;
}
} | class RealNodeRepository implements NodeRepository {
private static final PrefixLogger NODE_ADMIN_LOGGER = PrefixLogger.getNodeAdminLogger(RealNodeRepository.class);
private final ConfigServerApi configServerApi;
public RealNodeRepository(ConfigServerApi configServerApi) {
this.configServerApi = configServerApi;
}
@Override
public void addNodes(List<AddNode> nodes) {
List<NodeRepositoryNode> nodesToPost = nodes.stream()
.map(RealNodeRepository::nodeRepositoryNodeFromAddNode)
.collect(Collectors.toList());
NodeMessageResponse response = configServerApi.post("/nodes/v2/node", nodesToPost, NodeMessageResponse.class);
if (!Strings.isNullOrEmpty(response.errorCode)) {
throw new RuntimeException("Failed to add nodes to node-repo: " + response.message + " " + response.errorCode);
}
}
@Override
public List<NodeSpec> getNodes(String baseHostName) {
String path = "/nodes/v2/node/?recursive=true&parentHost=" + baseHostName;
final GetNodesResponse nodesForHost = configServerApi.get(path, GetNodesResponse.class);
return nodesForHost.nodes.stream()
.map(RealNodeRepository::createNodeSpec)
.collect(Collectors.toList());
}
@Override
public Optional<NodeSpec> getNode(String hostName) {
try {
NodeRepositoryNode nodeResponse = configServerApi.get("/nodes/v2/node/" + hostName,
NodeRepositoryNode.class);
if (nodeResponse == null) {
return Optional.empty();
}
return Optional.of(createNodeSpec(nodeResponse));
} catch (HttpException.NotFoundException | HttpException.ForbiddenException e) {
return Optional.empty();
}
}
/**
* Get all ACLs that belongs to a hostname. Usually this is a parent host and all
* ACLs for child nodes are returned.
*/
@Override
public Map<String, Acl> getAcls(String hostName) {
try {
final String path = String.format("/nodes/v2/acl/%s?children=true", hostName);
final GetAclResponse response = configServerApi.get(path, GetAclResponse.class);
Map<String, List<Integer>> trustedPorts = response.trustedPorts.stream()
.collect(Collectors.groupingBy(
GetAclResponse.Port::getTrustedBy,
Collectors.mapping(port -> port.port, Collectors.toList())));
Map<String, List<InetAddress>> trustedNodes = response.trustedNodes.stream()
.collect(Collectors.groupingBy(
GetAclResponse.Node::getTrustedBy,
Collectors.mapping(node -> InetAddresses.forString(node.ipAddress), Collectors.toList())));
return Stream.of(trustedNodes.keySet(), trustedPorts.keySet())
.flatMap(Set::stream)
.distinct()
.collect(Collectors.toMap(
Function.identity(),
hostname -> new Acl(trustedPorts.get(hostname), trustedNodes.get(hostname))));
} catch (HttpException.NotFoundException e) {
NODE_ADMIN_LOGGER.warning("Failed to fetch ACLs for " + hostName + " No ACL will be applied");
}
return Collections.emptyMap();
}
@Override
public void updateNodeAttributes(String hostName, NodeAttributes nodeAttributes) {
NodeMessageResponse response = configServerApi.patch(
"/nodes/v2/node/" + hostName,
nodeRepositoryNodeFromNodeAttributes(nodeAttributes),
NodeMessageResponse.class);
if (!Strings.isNullOrEmpty(response.errorCode)) {
throw new RuntimeException("Unexpected message " + response.message + " " + response.errorCode);
}
}
@Override
public void setNodeState(String hostName, Node.State nodeState) {
String state = nodeState.name();
NodeMessageResponse response = configServerApi.put(
"/nodes/v2/state/" + state + "/" + hostName,
Optional.empty(), /* body */
NodeMessageResponse.class);
NODE_ADMIN_LOGGER.info(response.message);
if (response.errorCode == null || response.errorCode.isEmpty()) {
return;
}
throw new RuntimeException("Unexpected message " + response.message + " " + response.errorCode);
}
private static NodeSpec createNodeSpec(NodeRepositoryNode node)
throws IllegalArgumentException, NullPointerException {
Objects.requireNonNull(node.type, "Unknown node type");
NodeType nodeType = NodeType.valueOf(node.type);
Objects.requireNonNull(node.state, "Unknown node state");
Node.State nodeState = Node.State.valueOf(node.state);
if (nodeState == Node.State.active) {
Objects.requireNonNull(node.wantedVespaVersion, "Unknown vespa version for active node");
Objects.requireNonNull(node.wantedDockerImage, "Unknown docker image for active node");
Objects.requireNonNull(node.restartGeneration, "Unknown restartGeneration for active node");
Objects.requireNonNull(node.currentRestartGeneration, "Unknown currentRestartGeneration for active node");
}
String hostName = Objects.requireNonNull(node.hostname, "hostname is null");
NodeSpec.Owner owner = null;
if (node.owner != null) {
owner = new NodeSpec.Owner(node.owner.tenant, node.owner.application, node.owner.instance);
}
NodeSpec.Membership membership = null;
if (node.membership != null) {
membership = new NodeSpec.Membership(node.membership.clusterType, node.membership.clusterId,
node.membership.group, node.membership.index, node.membership.retired);
}
return new NodeSpec(
hostName,
Optional.ofNullable(node.wantedDockerImage).map(DockerImage::new),
Optional.ofNullable(node.currentDockerImage).map(DockerImage::new),
nodeState,
nodeType,
node.flavor,
node.canonicalFlavor,
Optional.ofNullable(node.wantedVespaVersion),
Optional.ofNullable(node.vespaVersion),
Optional.ofNullable(node.allowedToBeDown),
Optional.ofNullable(owner),
Optional.ofNullable(membership),
Optional.ofNullable(node.restartGeneration),
Optional.ofNullable(node.currentRestartGeneration),
node.rebootGeneration,
node.currentRebootGeneration,
node.minCpuCores,
node.minMainMemoryAvailableGb,
node.minDiskAvailableGb,
node.fastDisk,
node.ipAddresses,
Optional.ofNullable(node.hardwareDivergence),
Optional.ofNullable(node.parentHostname));
}
private static NodeRepositoryNode nodeRepositoryNodeFromNodeAttributes(NodeAttributes nodeAttributes) {
NodeRepositoryNode node = new NodeRepositoryNode();
node.currentDockerImage = Optional.ofNullable(nodeAttributes.getDockerImage()).map(DockerImage::asString).orElse(null);
node.currentRestartGeneration = nodeAttributes.getRestartGeneration();
node.currentRebootGeneration = nodeAttributes.getRebootGeneration();
node.hardwareDivergence = nodeAttributes.getHardwareDivergence();
return node;
}
} |
Strange choice of variable name :) | public List<HostName> cluster() {
return Arrays.stream(zooKeeperEnsembleConnectionSpec.split(","))
.filter(hostPort -> !hostPort.isEmpty())
.map(hostPort -> hostPort.contains(":")
? hostPort.substring(0, hostPort.indexOf(":"))
: hostPort)
.map(HostName::from)
.collect(Collectors.toList());
} | .filter(hostPort -> !hostPort.isEmpty()) | public List<HostName> cluster() {
return Arrays.stream(curator.zooKeeperEnsembleConnectionSpec().split(","))
.filter(hostAndPort -> !hostAndPort.isEmpty())
.map(hostAndPort -> hostAndPort.split(":")[0])
.map(HostName::from)
.collect(Collectors.toList());
} | class CuratorDb {
private static final Logger log = Logger.getLogger(CuratorDb.class.getName());
private static final Duration defaultLockTimeout = Duration.ofMinutes(5);
private static final Path root = Path.fromString("/controller/v1");
private static final Path lockRoot = root.append("locks");
private static final Path tenantRoot = root.append("tenants");
private static final Path applicationRoot = root.append("applications");
private static final Path controllerRoot = root.append("controllers");
private final StringSetSerializer stringSetSerializer = new StringSetSerializer();
private final VersionStatusSerializer versionStatusSerializer = new VersionStatusSerializer();
private final VersionSerializer versionSerializer = new VersionSerializer();
private final ConfidenceOverrideSerializer confidenceOverrideSerializer = new ConfidenceOverrideSerializer();
private final TenantSerializer tenantSerializer = new TenantSerializer();
private final ApplicationSerializer applicationSerializer = new ApplicationSerializer();
private final Curator curator;
private final String zooKeeperEnsembleConnectionSpec;
/**
* All keys, to allow reentrancy.
* This will grow forever, but this should be too slow to be a problem.
*/
private final ConcurrentHashMap<Path, Lock> locks = new ConcurrentHashMap<>();
@Inject
public CuratorDb(Curator curator) {
this(curator, curator.zooKeeperEnsembleConnectionSpec());
}
CuratorDb(Curator curator, String zooKeeperEnsembleConnectionSpec) {
this.curator = curator;
this.zooKeeperEnsembleConnectionSpec = zooKeeperEnsembleConnectionSpec;
}
/** Returns all hosts configured to be part of this ZooKeeper cluster */
public Lock lock(TenantName name, Duration timeout) {
return lock(lockPath(name), timeout);
}
public Lock lock(ApplicationId id, Duration timeout) {
return lock(lockPath(id), timeout);
}
public Lock lockRotations() {
return lock(lockRoot.append("rotations"), defaultLockTimeout);
}
/** Create a reentrant lock */
private Lock lock(Path path, Duration timeout) {
Lock lock = locks.computeIfAbsent(path, (pathArg) -> new Lock(pathArg.getAbsolute(), curator));
lock.acquire(timeout);
return lock;
}
public Lock lockInactiveJobs() {
return lock(lockRoot.append("inactiveJobsLock"), defaultLockTimeout);
}
public Lock lockMaintenanceJob(String jobName) {
return lock(lockRoot.append("maintenanceJobLocks").append(jobName), Duration.ofSeconds(1));
}
@SuppressWarnings("unused")
public Lock lockProvisionState(String provisionStateId) {
return lock(lockPath(provisionStateId), Duration.ofSeconds(1));
}
@SuppressWarnings("unused")
public Lock lockVespaServerPool() {
return lock(lockRoot.append("vespaServerPoolLock"), Duration.ofSeconds(1));
}
@SuppressWarnings("unused")
public Lock lockOpenStackServerPool() {
return lock(lockRoot.append("openStackServerPoolLock"), Duration.ofSeconds(1));
}
private <T> Optional<T> read(Path path, Function<byte[], T> mapper) {
return curator.getData(path).filter(data -> data.length > 0).map(mapper);
}
private Optional<Slime> readSlime(Path path) {
return read(path, SlimeUtils::jsonToSlime);
}
private static byte[] asJson(Slime slime) {
try {
return SlimeUtils.toJsonBytes(slime);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
public Set<String> readInactiveJobs() {
try {
return readSlime(inactiveJobsPath()).map(stringSetSerializer::fromSlime).orElseGet(HashSet::new);
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Error reading inactive jobs, deleting inactive state");
writeInactiveJobs(Collections.emptySet());
return new HashSet<>();
}
}
public void writeInactiveJobs(Set<String> inactiveJobs) {
curator.set(inactiveJobsPath(), stringSetSerializer.toJson(inactiveJobs));
}
public double readUpgradesPerMinute() {
return read(upgradesPerMinutePath(), ByteBuffer::wrap).map(ByteBuffer::getDouble).orElse(0.5);
}
public void writeUpgradesPerMinute(double n) {
if (n < 0) {
throw new IllegalArgumentException("Upgrades per minute must be >= 0");
}
curator.set(upgradesPerMinutePath(), ByteBuffer.allocate(Double.BYTES).putDouble(n).array());
}
public void writeVersionStatus(VersionStatus status) {
curator.set(versionStatusPath(), asJson(versionStatusSerializer.toSlime(status)));
}
public VersionStatus readVersionStatus() {
return readSlime(versionStatusPath()).map(versionStatusSerializer::fromSlime).orElseGet(VersionStatus::empty);
}
public void writeConfidenceOverrides(Map<Version, VespaVersion.Confidence> overrides) {
curator.set(confidenceOverridesPath(), asJson(confidenceOverrideSerializer.toSlime(overrides)));
}
public Map<Version, VespaVersion.Confidence> readConfidenceOverrides() {
return readSlime(confidenceOverridesPath()).map(confidenceOverrideSerializer::fromSlime)
.orElseGet(Collections::emptyMap);
}
public void writeControllerVersion(HostName hostname, Version version) {
curator.set(controllerPath(hostname.value()), asJson(versionSerializer.toSlime(version)));
}
public Version readControllerVersion(HostName hostname) {
return readSlime(controllerPath(hostname.value()))
.map(versionSerializer::fromSlime)
.orElse(Vtag.currentVersion);
}
public void writeTenant(UserTenant tenant) {
curator.set(tenantPath(tenant.name()), asJson(tenantSerializer.toSlime(tenant)));
}
public Optional<UserTenant> readUserTenant(TenantName name) {
return readSlime(tenantPath(name)).map(tenantSerializer::userTenantFrom);
}
public void writeTenant(AthenzTenant tenant) {
curator.set(tenantPath(tenant.name()), asJson(tenantSerializer.toSlime(tenant)));
}
public Optional<AthenzTenant> readAthenzTenant(TenantName name) {
return readSlime(tenantPath(name)).map(tenantSerializer::athenzTenantFrom);
}
public Optional<Tenant> readTenant(TenantName name) {
if (name.value().startsWith(Tenant.userPrefix)) {
return readUserTenant(name).map(Tenant.class::cast);
}
return readAthenzTenant(name).map(Tenant.class::cast);
}
public List<Tenant> readTenants() {
return curator.getChildren(tenantRoot).stream()
.map(TenantName::from)
.map(this::readTenant)
.filter(Optional::isPresent)
.map(Optional::get)
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
public void removeTenant(TenantName name) {
curator.delete(tenantPath(name));
}
public void writeApplication(Application application) {
curator.set(applicationPath(application.id()), asJson(applicationSerializer.toSlime(application)));
}
public Optional<Application> readApplication(ApplicationId application) {
return readSlime(applicationPath(application)).map(applicationSerializer::fromSlime);
}
public List<Application> readApplications() {
return readApplications(ignored -> true);
}
public List<Application> readApplications(TenantName name) {
return readApplications(application -> application.tenant().equals(name));
}
private List<Application> readApplications(Predicate<ApplicationId> applicationFilter) {
return curator.getChildren(applicationRoot).stream()
.map(ApplicationId::fromSerializedForm)
.filter(applicationFilter)
.map(this::readApplication)
.filter(Optional::isPresent)
.map(Optional::get)
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
public void removeApplication(ApplicationId application) {
curator.delete(applicationPath(application));
}
@SuppressWarnings("unused")
public Optional<byte[]> readProvisionState(String provisionId) {
return curator.getData(provisionStatePath(provisionId));
}
@SuppressWarnings("unused")
public void writeProvisionState(String provisionId, byte[] data) {
curator.set(provisionStatePath(provisionId), data);
}
@SuppressWarnings("unused")
public List<String> readProvisionStateIds() {
return curator.getChildren(provisionStatePath());
}
@SuppressWarnings("unused")
public Optional<byte[]> readVespaServerPool() {
return curator.getData(vespaServerPoolPath());
}
@SuppressWarnings("unused")
public void writeVespaServerPool(byte[] data) {
curator.set(vespaServerPoolPath(), data);
}
@SuppressWarnings("unused")
public Optional<byte[]> readOpenStackServerPool() {
return curator.getData(openStackServerPoolPath());
}
@SuppressWarnings("unused")
public void writeOpenStackServerPool(byte[] data) {
curator.set(openStackServerPoolPath(), data);
}
private Path lockPath(TenantName tenant) {
Path lockPath = lockRoot
.append(tenant.value());
curator.create(lockPath);
return lockPath;
}
private Path lockPath(ApplicationId application) {
Path lockPath = lockRoot
.append(application.tenant().value())
.append(application.application().value())
.append(application.instance().value());
curator.create(lockPath);
return lockPath;
}
private Path lockPath(String provisionId) {
Path lockPath = lockRoot
.append(provisionStatePath())
.append(provisionId);
curator.create(lockPath);
return lockPath;
}
private static Path inactiveJobsPath() {
return root.append("inactiveJobs");
}
private static Path upgradesPerMinutePath() {
return root.append("upgrader").append("upgradesPerMinute");
}
private static Path confidenceOverridesPath() {
return root.append("upgrader").append("confidenceOverrides");
}
private static Path versionStatusPath() {
return root.append("versionStatus");
}
private static Path provisionStatePath() {
return root.append("provisioning").append("states");
}
private static Path provisionStatePath(String provisionId) {
return provisionStatePath().append(provisionId);
}
private static Path vespaServerPoolPath() {
return root.append("vespaServerPool");
}
private static Path openStackServerPoolPath() {
return root.append("openStackServerPool");
}
private static Path tenantPath(TenantName name) {
return tenantRoot.append(name.value());
}
private static Path applicationPath(ApplicationId application) {
return applicationRoot.append(application.serializedForm());
}
private static Path controllerPath(String hostname) {
return controllerRoot.append(hostname);
}
} | class CuratorDb {
private static final Logger log = Logger.getLogger(CuratorDb.class.getName());
private static final Duration defaultLockTimeout = Duration.ofMinutes(5);
private static final Path root = Path.fromString("/controller/v1");
private static final Path lockRoot = root.append("locks");
private static final Path tenantRoot = root.append("tenants");
private static final Path applicationRoot = root.append("applications");
private static final Path controllerRoot = root.append("controllers");
private final StringSetSerializer stringSetSerializer = new StringSetSerializer();
private final VersionStatusSerializer versionStatusSerializer = new VersionStatusSerializer();
private final VersionSerializer versionSerializer = new VersionSerializer();
private final ConfidenceOverrideSerializer confidenceOverrideSerializer = new ConfidenceOverrideSerializer();
private final TenantSerializer tenantSerializer = new TenantSerializer();
private final ApplicationSerializer applicationSerializer = new ApplicationSerializer();
private final Curator curator;
/**
* All keys, to allow reentrancy.
* This will grow forever, but this should be too slow to be a problem.
*/
private final ConcurrentHashMap<Path, Lock> locks = new ConcurrentHashMap<>();
@Inject
public CuratorDb(Curator curator) {
this.curator = curator;
}
/** Returns all hosts configured to be part of this ZooKeeper cluster */
public Lock lock(TenantName name, Duration timeout) {
return lock(lockPath(name), timeout);
}
public Lock lock(ApplicationId id, Duration timeout) {
return lock(lockPath(id), timeout);
}
public Lock lockRotations() {
return lock(lockRoot.append("rotations"), defaultLockTimeout);
}
/** Create a reentrant lock */
private Lock lock(Path path, Duration timeout) {
Lock lock = locks.computeIfAbsent(path, (pathArg) -> new Lock(pathArg.getAbsolute(), curator));
lock.acquire(timeout);
return lock;
}
public Lock lockInactiveJobs() {
return lock(lockRoot.append("inactiveJobsLock"), defaultLockTimeout);
}
public Lock lockMaintenanceJob(String jobName) {
return lock(lockRoot.append("maintenanceJobLocks").append(jobName), Duration.ofSeconds(1));
}
@SuppressWarnings("unused")
public Lock lockProvisionState(String provisionStateId) {
return lock(lockPath(provisionStateId), Duration.ofSeconds(1));
}
@SuppressWarnings("unused")
public Lock lockVespaServerPool() {
return lock(lockRoot.append("vespaServerPoolLock"), Duration.ofSeconds(1));
}
@SuppressWarnings("unused")
public Lock lockOpenStackServerPool() {
return lock(lockRoot.append("openStackServerPoolLock"), Duration.ofSeconds(1));
}
private <T> Optional<T> read(Path path, Function<byte[], T> mapper) {
return curator.getData(path).filter(data -> data.length > 0).map(mapper);
}
private Optional<Slime> readSlime(Path path) {
return read(path, SlimeUtils::jsonToSlime);
}
private static byte[] asJson(Slime slime) {
try {
return SlimeUtils.toJsonBytes(slime);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
public Set<String> readInactiveJobs() {
try {
return readSlime(inactiveJobsPath()).map(stringSetSerializer::fromSlime).orElseGet(HashSet::new);
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Error reading inactive jobs, deleting inactive state");
writeInactiveJobs(Collections.emptySet());
return new HashSet<>();
}
}
public void writeInactiveJobs(Set<String> inactiveJobs) {
curator.set(inactiveJobsPath(), stringSetSerializer.toJson(inactiveJobs));
}
public double readUpgradesPerMinute() {
return read(upgradesPerMinutePath(), ByteBuffer::wrap).map(ByteBuffer::getDouble).orElse(0.5);
}
public void writeUpgradesPerMinute(double n) {
if (n < 0) {
throw new IllegalArgumentException("Upgrades per minute must be >= 0");
}
curator.set(upgradesPerMinutePath(), ByteBuffer.allocate(Double.BYTES).putDouble(n).array());
}
public void writeVersionStatus(VersionStatus status) {
curator.set(versionStatusPath(), asJson(versionStatusSerializer.toSlime(status)));
}
public VersionStatus readVersionStatus() {
return readSlime(versionStatusPath()).map(versionStatusSerializer::fromSlime).orElseGet(VersionStatus::empty);
}
public void writeConfidenceOverrides(Map<Version, VespaVersion.Confidence> overrides) {
curator.set(confidenceOverridesPath(), asJson(confidenceOverrideSerializer.toSlime(overrides)));
}
public Map<Version, VespaVersion.Confidence> readConfidenceOverrides() {
return readSlime(confidenceOverridesPath()).map(confidenceOverrideSerializer::fromSlime)
.orElseGet(Collections::emptyMap);
}
public void writeControllerVersion(HostName hostname, Version version) {
curator.set(controllerPath(hostname.value()), asJson(versionSerializer.toSlime(version)));
}
public Version readControllerVersion(HostName hostname) {
return readSlime(controllerPath(hostname.value()))
.map(versionSerializer::fromSlime)
.orElse(Vtag.currentVersion);
}
public void writeTenant(UserTenant tenant) {
curator.set(tenantPath(tenant.name()), asJson(tenantSerializer.toSlime(tenant)));
}
public Optional<UserTenant> readUserTenant(TenantName name) {
return readSlime(tenantPath(name)).map(tenantSerializer::userTenantFrom);
}
public void writeTenant(AthenzTenant tenant) {
curator.set(tenantPath(tenant.name()), asJson(tenantSerializer.toSlime(tenant)));
}
public Optional<AthenzTenant> readAthenzTenant(TenantName name) {
return readSlime(tenantPath(name)).map(tenantSerializer::athenzTenantFrom);
}
public Optional<Tenant> readTenant(TenantName name) {
if (name.value().startsWith(Tenant.userPrefix)) {
return readUserTenant(name).map(Tenant.class::cast);
}
return readAthenzTenant(name).map(Tenant.class::cast);
}
public List<Tenant> readTenants() {
return curator.getChildren(tenantRoot).stream()
.map(TenantName::from)
.map(this::readTenant)
.filter(Optional::isPresent)
.map(Optional::get)
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
public void removeTenant(TenantName name) {
curator.delete(tenantPath(name));
}
public void writeApplication(Application application) {
curator.set(applicationPath(application.id()), asJson(applicationSerializer.toSlime(application)));
}
public Optional<Application> readApplication(ApplicationId application) {
return readSlime(applicationPath(application)).map(applicationSerializer::fromSlime);
}
public List<Application> readApplications() {
return readApplications(ignored -> true);
}
public List<Application> readApplications(TenantName name) {
return readApplications(application -> application.tenant().equals(name));
}
private List<Application> readApplications(Predicate<ApplicationId> applicationFilter) {
return curator.getChildren(applicationRoot).stream()
.map(ApplicationId::fromSerializedForm)
.filter(applicationFilter)
.map(this::readApplication)
.filter(Optional::isPresent)
.map(Optional::get)
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
public void removeApplication(ApplicationId application) {
curator.delete(applicationPath(application));
}
@SuppressWarnings("unused")
public Optional<byte[]> readProvisionState(String provisionId) {
return curator.getData(provisionStatePath(provisionId));
}
@SuppressWarnings("unused")
public void writeProvisionState(String provisionId, byte[] data) {
curator.set(provisionStatePath(provisionId), data);
}
@SuppressWarnings("unused")
public List<String> readProvisionStateIds() {
return curator.getChildren(provisionStatePath());
}
@SuppressWarnings("unused")
public Optional<byte[]> readVespaServerPool() {
return curator.getData(vespaServerPoolPath());
}
@SuppressWarnings("unused")
public void writeVespaServerPool(byte[] data) {
curator.set(vespaServerPoolPath(), data);
}
@SuppressWarnings("unused")
public Optional<byte[]> readOpenStackServerPool() {
return curator.getData(openStackServerPoolPath());
}
@SuppressWarnings("unused")
public void writeOpenStackServerPool(byte[] data) {
curator.set(openStackServerPoolPath(), data);
}
private Path lockPath(TenantName tenant) {
Path lockPath = lockRoot
.append(tenant.value());
curator.create(lockPath);
return lockPath;
}
private Path lockPath(ApplicationId application) {
Path lockPath = lockRoot
.append(application.tenant().value())
.append(application.application().value())
.append(application.instance().value());
curator.create(lockPath);
return lockPath;
}
private Path lockPath(String provisionId) {
Path lockPath = lockRoot
.append(provisionStatePath())
.append(provisionId);
curator.create(lockPath);
return lockPath;
}
private static Path inactiveJobsPath() {
return root.append("inactiveJobs");
}
private static Path upgradesPerMinutePath() {
return root.append("upgrader").append("upgradesPerMinute");
}
private static Path confidenceOverridesPath() {
return root.append("upgrader").append("confidenceOverrides");
}
private static Path versionStatusPath() {
return root.append("versionStatus");
}
private static Path provisionStatePath() {
return root.append("provisioning").append("states");
}
private static Path provisionStatePath(String provisionId) {
return provisionStatePath().append(provisionId);
}
private static Path vespaServerPoolPath() {
return root.append("vespaServerPool");
}
private static Path openStackServerPoolPath() {
return root.append("openStackServerPool");
}
private static Path tenantPath(TenantName name) {
return tenantRoot.append(name.value());
}
private static Path applicationPath(ApplicationId application) {
return applicationRoot.append(application.serializedForm());
}
private static Path controllerPath(String hostname) {
return controllerRoot.append(hostname);
}
} |
I suppose it means _host and port_ -- how about camel-casing that, or just using _host_? I read this as _port of the host_, and was confused. | public List<HostName> cluster() {
return Arrays.stream(zooKeeperEnsembleConnectionSpec.split(","))
.filter(hostPort -> !hostPort.isEmpty())
.map(hostPort -> hostPort.contains(":")
? hostPort.substring(0, hostPort.indexOf(":"))
: hostPort)
.map(HostName::from)
.collect(Collectors.toList());
} | .filter(hostPort -> !hostPort.isEmpty()) | public List<HostName> cluster() {
return Arrays.stream(curator.zooKeeperEnsembleConnectionSpec().split(","))
.filter(hostAndPort -> !hostAndPort.isEmpty())
.map(hostAndPort -> hostAndPort.split(":")[0])
.map(HostName::from)
.collect(Collectors.toList());
} | class CuratorDb {
private static final Logger log = Logger.getLogger(CuratorDb.class.getName());
private static final Duration defaultLockTimeout = Duration.ofMinutes(5);
private static final Path root = Path.fromString("/controller/v1");
private static final Path lockRoot = root.append("locks");
private static final Path tenantRoot = root.append("tenants");
private static final Path applicationRoot = root.append("applications");
private static final Path controllerRoot = root.append("controllers");
private final StringSetSerializer stringSetSerializer = new StringSetSerializer();
private final VersionStatusSerializer versionStatusSerializer = new VersionStatusSerializer();
private final VersionSerializer versionSerializer = new VersionSerializer();
private final ConfidenceOverrideSerializer confidenceOverrideSerializer = new ConfidenceOverrideSerializer();
private final TenantSerializer tenantSerializer = new TenantSerializer();
private final ApplicationSerializer applicationSerializer = new ApplicationSerializer();
private final Curator curator;
private final String zooKeeperEnsembleConnectionSpec;
/**
* All keys, to allow reentrancy.
* This will grow forever, but this should be too slow to be a problem.
*/
private final ConcurrentHashMap<Path, Lock> locks = new ConcurrentHashMap<>();
@Inject
public CuratorDb(Curator curator) {
this(curator, curator.zooKeeperEnsembleConnectionSpec());
}
CuratorDb(Curator curator, String zooKeeperEnsembleConnectionSpec) {
this.curator = curator;
this.zooKeeperEnsembleConnectionSpec = zooKeeperEnsembleConnectionSpec;
}
/** Returns all hosts configured to be part of this ZooKeeper cluster */
public Lock lock(TenantName name, Duration timeout) {
return lock(lockPath(name), timeout);
}
public Lock lock(ApplicationId id, Duration timeout) {
return lock(lockPath(id), timeout);
}
public Lock lockRotations() {
return lock(lockRoot.append("rotations"), defaultLockTimeout);
}
/** Create a reentrant lock */
private Lock lock(Path path, Duration timeout) {
Lock lock = locks.computeIfAbsent(path, (pathArg) -> new Lock(pathArg.getAbsolute(), curator));
lock.acquire(timeout);
return lock;
}
public Lock lockInactiveJobs() {
return lock(lockRoot.append("inactiveJobsLock"), defaultLockTimeout);
}
public Lock lockMaintenanceJob(String jobName) {
return lock(lockRoot.append("maintenanceJobLocks").append(jobName), Duration.ofSeconds(1));
}
@SuppressWarnings("unused")
public Lock lockProvisionState(String provisionStateId) {
return lock(lockPath(provisionStateId), Duration.ofSeconds(1));
}
@SuppressWarnings("unused")
public Lock lockVespaServerPool() {
return lock(lockRoot.append("vespaServerPoolLock"), Duration.ofSeconds(1));
}
@SuppressWarnings("unused")
public Lock lockOpenStackServerPool() {
return lock(lockRoot.append("openStackServerPoolLock"), Duration.ofSeconds(1));
}
private <T> Optional<T> read(Path path, Function<byte[], T> mapper) {
return curator.getData(path).filter(data -> data.length > 0).map(mapper);
}
private Optional<Slime> readSlime(Path path) {
return read(path, SlimeUtils::jsonToSlime);
}
private static byte[] asJson(Slime slime) {
try {
return SlimeUtils.toJsonBytes(slime);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
public Set<String> readInactiveJobs() {
try {
return readSlime(inactiveJobsPath()).map(stringSetSerializer::fromSlime).orElseGet(HashSet::new);
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Error reading inactive jobs, deleting inactive state");
writeInactiveJobs(Collections.emptySet());
return new HashSet<>();
}
}
public void writeInactiveJobs(Set<String> inactiveJobs) {
curator.set(inactiveJobsPath(), stringSetSerializer.toJson(inactiveJobs));
}
public double readUpgradesPerMinute() {
return read(upgradesPerMinutePath(), ByteBuffer::wrap).map(ByteBuffer::getDouble).orElse(0.5);
}
public void writeUpgradesPerMinute(double n) {
if (n < 0) {
throw new IllegalArgumentException("Upgrades per minute must be >= 0");
}
curator.set(upgradesPerMinutePath(), ByteBuffer.allocate(Double.BYTES).putDouble(n).array());
}
public void writeVersionStatus(VersionStatus status) {
curator.set(versionStatusPath(), asJson(versionStatusSerializer.toSlime(status)));
}
public VersionStatus readVersionStatus() {
return readSlime(versionStatusPath()).map(versionStatusSerializer::fromSlime).orElseGet(VersionStatus::empty);
}
public void writeConfidenceOverrides(Map<Version, VespaVersion.Confidence> overrides) {
curator.set(confidenceOverridesPath(), asJson(confidenceOverrideSerializer.toSlime(overrides)));
}
public Map<Version, VespaVersion.Confidence> readConfidenceOverrides() {
return readSlime(confidenceOverridesPath()).map(confidenceOverrideSerializer::fromSlime)
.orElseGet(Collections::emptyMap);
}
public void writeControllerVersion(HostName hostname, Version version) {
curator.set(controllerPath(hostname.value()), asJson(versionSerializer.toSlime(version)));
}
public Version readControllerVersion(HostName hostname) {
return readSlime(controllerPath(hostname.value()))
.map(versionSerializer::fromSlime)
.orElse(Vtag.currentVersion);
}
public void writeTenant(UserTenant tenant) {
curator.set(tenantPath(tenant.name()), asJson(tenantSerializer.toSlime(tenant)));
}
public Optional<UserTenant> readUserTenant(TenantName name) {
return readSlime(tenantPath(name)).map(tenantSerializer::userTenantFrom);
}
public void writeTenant(AthenzTenant tenant) {
curator.set(tenantPath(tenant.name()), asJson(tenantSerializer.toSlime(tenant)));
}
public Optional<AthenzTenant> readAthenzTenant(TenantName name) {
return readSlime(tenantPath(name)).map(tenantSerializer::athenzTenantFrom);
}
public Optional<Tenant> readTenant(TenantName name) {
if (name.value().startsWith(Tenant.userPrefix)) {
return readUserTenant(name).map(Tenant.class::cast);
}
return readAthenzTenant(name).map(Tenant.class::cast);
}
public List<Tenant> readTenants() {
return curator.getChildren(tenantRoot).stream()
.map(TenantName::from)
.map(this::readTenant)
.filter(Optional::isPresent)
.map(Optional::get)
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
public void removeTenant(TenantName name) {
curator.delete(tenantPath(name));
}
public void writeApplication(Application application) {
curator.set(applicationPath(application.id()), asJson(applicationSerializer.toSlime(application)));
}
public Optional<Application> readApplication(ApplicationId application) {
return readSlime(applicationPath(application)).map(applicationSerializer::fromSlime);
}
public List<Application> readApplications() {
return readApplications(ignored -> true);
}
public List<Application> readApplications(TenantName name) {
return readApplications(application -> application.tenant().equals(name));
}
private List<Application> readApplications(Predicate<ApplicationId> applicationFilter) {
return curator.getChildren(applicationRoot).stream()
.map(ApplicationId::fromSerializedForm)
.filter(applicationFilter)
.map(this::readApplication)
.filter(Optional::isPresent)
.map(Optional::get)
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
public void removeApplication(ApplicationId application) {
curator.delete(applicationPath(application));
}
@SuppressWarnings("unused")
public Optional<byte[]> readProvisionState(String provisionId) {
return curator.getData(provisionStatePath(provisionId));
}
@SuppressWarnings("unused")
public void writeProvisionState(String provisionId, byte[] data) {
curator.set(provisionStatePath(provisionId), data);
}
@SuppressWarnings("unused")
public List<String> readProvisionStateIds() {
return curator.getChildren(provisionStatePath());
}
@SuppressWarnings("unused")
public Optional<byte[]> readVespaServerPool() {
return curator.getData(vespaServerPoolPath());
}
@SuppressWarnings("unused")
public void writeVespaServerPool(byte[] data) {
curator.set(vespaServerPoolPath(), data);
}
@SuppressWarnings("unused")
public Optional<byte[]> readOpenStackServerPool() {
return curator.getData(openStackServerPoolPath());
}
@SuppressWarnings("unused")
public void writeOpenStackServerPool(byte[] data) {
curator.set(openStackServerPoolPath(), data);
}
private Path lockPath(TenantName tenant) {
Path lockPath = lockRoot
.append(tenant.value());
curator.create(lockPath);
return lockPath;
}
private Path lockPath(ApplicationId application) {
Path lockPath = lockRoot
.append(application.tenant().value())
.append(application.application().value())
.append(application.instance().value());
curator.create(lockPath);
return lockPath;
}
private Path lockPath(String provisionId) {
Path lockPath = lockRoot
.append(provisionStatePath())
.append(provisionId);
curator.create(lockPath);
return lockPath;
}
private static Path inactiveJobsPath() {
return root.append("inactiveJobs");
}
private static Path upgradesPerMinutePath() {
return root.append("upgrader").append("upgradesPerMinute");
}
private static Path confidenceOverridesPath() {
return root.append("upgrader").append("confidenceOverrides");
}
private static Path versionStatusPath() {
return root.append("versionStatus");
}
private static Path provisionStatePath() {
return root.append("provisioning").append("states");
}
private static Path provisionStatePath(String provisionId) {
return provisionStatePath().append(provisionId);
}
private static Path vespaServerPoolPath() {
return root.append("vespaServerPool");
}
private static Path openStackServerPoolPath() {
return root.append("openStackServerPool");
}
private static Path tenantPath(TenantName name) {
return tenantRoot.append(name.value());
}
private static Path applicationPath(ApplicationId application) {
return applicationRoot.append(application.serializedForm());
}
private static Path controllerPath(String hostname) {
return controllerRoot.append(hostname);
}
} | class CuratorDb {
private static final Logger log = Logger.getLogger(CuratorDb.class.getName());
private static final Duration defaultLockTimeout = Duration.ofMinutes(5);
private static final Path root = Path.fromString("/controller/v1");
private static final Path lockRoot = root.append("locks");
private static final Path tenantRoot = root.append("tenants");
private static final Path applicationRoot = root.append("applications");
private static final Path controllerRoot = root.append("controllers");
private final StringSetSerializer stringSetSerializer = new StringSetSerializer();
private final VersionStatusSerializer versionStatusSerializer = new VersionStatusSerializer();
private final VersionSerializer versionSerializer = new VersionSerializer();
private final ConfidenceOverrideSerializer confidenceOverrideSerializer = new ConfidenceOverrideSerializer();
private final TenantSerializer tenantSerializer = new TenantSerializer();
private final ApplicationSerializer applicationSerializer = new ApplicationSerializer();
private final Curator curator;
/**
* All keys, to allow reentrancy.
* This will grow forever, but this should be too slow to be a problem.
*/
private final ConcurrentHashMap<Path, Lock> locks = new ConcurrentHashMap<>();
@Inject
public CuratorDb(Curator curator) {
this.curator = curator;
}
/** Returns all hosts configured to be part of this ZooKeeper cluster */
public Lock lock(TenantName name, Duration timeout) {
return lock(lockPath(name), timeout);
}
public Lock lock(ApplicationId id, Duration timeout) {
return lock(lockPath(id), timeout);
}
public Lock lockRotations() {
return lock(lockRoot.append("rotations"), defaultLockTimeout);
}
/** Create a reentrant lock */
private Lock lock(Path path, Duration timeout) {
Lock lock = locks.computeIfAbsent(path, (pathArg) -> new Lock(pathArg.getAbsolute(), curator));
lock.acquire(timeout);
return lock;
}
public Lock lockInactiveJobs() {
return lock(lockRoot.append("inactiveJobsLock"), defaultLockTimeout);
}
public Lock lockMaintenanceJob(String jobName) {
return lock(lockRoot.append("maintenanceJobLocks").append(jobName), Duration.ofSeconds(1));
}
@SuppressWarnings("unused")
public Lock lockProvisionState(String provisionStateId) {
return lock(lockPath(provisionStateId), Duration.ofSeconds(1));
}
@SuppressWarnings("unused")
public Lock lockVespaServerPool() {
return lock(lockRoot.append("vespaServerPoolLock"), Duration.ofSeconds(1));
}
@SuppressWarnings("unused")
public Lock lockOpenStackServerPool() {
return lock(lockRoot.append("openStackServerPoolLock"), Duration.ofSeconds(1));
}
private <T> Optional<T> read(Path path, Function<byte[], T> mapper) {
return curator.getData(path).filter(data -> data.length > 0).map(mapper);
}
private Optional<Slime> readSlime(Path path) {
return read(path, SlimeUtils::jsonToSlime);
}
private static byte[] asJson(Slime slime) {
try {
return SlimeUtils.toJsonBytes(slime);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
public Set<String> readInactiveJobs() {
try {
return readSlime(inactiveJobsPath()).map(stringSetSerializer::fromSlime).orElseGet(HashSet::new);
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Error reading inactive jobs, deleting inactive state");
writeInactiveJobs(Collections.emptySet());
return new HashSet<>();
}
}
public void writeInactiveJobs(Set<String> inactiveJobs) {
curator.set(inactiveJobsPath(), stringSetSerializer.toJson(inactiveJobs));
}
public double readUpgradesPerMinute() {
return read(upgradesPerMinutePath(), ByteBuffer::wrap).map(ByteBuffer::getDouble).orElse(0.5);
}
public void writeUpgradesPerMinute(double n) {
if (n < 0) {
throw new IllegalArgumentException("Upgrades per minute must be >= 0");
}
curator.set(upgradesPerMinutePath(), ByteBuffer.allocate(Double.BYTES).putDouble(n).array());
}
public void writeVersionStatus(VersionStatus status) {
curator.set(versionStatusPath(), asJson(versionStatusSerializer.toSlime(status)));
}
public VersionStatus readVersionStatus() {
return readSlime(versionStatusPath()).map(versionStatusSerializer::fromSlime).orElseGet(VersionStatus::empty);
}
public void writeConfidenceOverrides(Map<Version, VespaVersion.Confidence> overrides) {
curator.set(confidenceOverridesPath(), asJson(confidenceOverrideSerializer.toSlime(overrides)));
}
public Map<Version, VespaVersion.Confidence> readConfidenceOverrides() {
return readSlime(confidenceOverridesPath()).map(confidenceOverrideSerializer::fromSlime)
.orElseGet(Collections::emptyMap);
}
public void writeControllerVersion(HostName hostname, Version version) {
curator.set(controllerPath(hostname.value()), asJson(versionSerializer.toSlime(version)));
}
public Version readControllerVersion(HostName hostname) {
return readSlime(controllerPath(hostname.value()))
.map(versionSerializer::fromSlime)
.orElse(Vtag.currentVersion);
}
public void writeTenant(UserTenant tenant) {
curator.set(tenantPath(tenant.name()), asJson(tenantSerializer.toSlime(tenant)));
}
public Optional<UserTenant> readUserTenant(TenantName name) {
return readSlime(tenantPath(name)).map(tenantSerializer::userTenantFrom);
}
public void writeTenant(AthenzTenant tenant) {
curator.set(tenantPath(tenant.name()), asJson(tenantSerializer.toSlime(tenant)));
}
public Optional<AthenzTenant> readAthenzTenant(TenantName name) {
return readSlime(tenantPath(name)).map(tenantSerializer::athenzTenantFrom);
}
public Optional<Tenant> readTenant(TenantName name) {
if (name.value().startsWith(Tenant.userPrefix)) {
return readUserTenant(name).map(Tenant.class::cast);
}
return readAthenzTenant(name).map(Tenant.class::cast);
}
public List<Tenant> readTenants() {
return curator.getChildren(tenantRoot).stream()
.map(TenantName::from)
.map(this::readTenant)
.filter(Optional::isPresent)
.map(Optional::get)
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
public void removeTenant(TenantName name) {
curator.delete(tenantPath(name));
}
public void writeApplication(Application application) {
curator.set(applicationPath(application.id()), asJson(applicationSerializer.toSlime(application)));
}
public Optional<Application> readApplication(ApplicationId application) {
return readSlime(applicationPath(application)).map(applicationSerializer::fromSlime);
}
public List<Application> readApplications() {
return readApplications(ignored -> true);
}
public List<Application> readApplications(TenantName name) {
return readApplications(application -> application.tenant().equals(name));
}
private List<Application> readApplications(Predicate<ApplicationId> applicationFilter) {
return curator.getChildren(applicationRoot).stream()
.map(ApplicationId::fromSerializedForm)
.filter(applicationFilter)
.map(this::readApplication)
.filter(Optional::isPresent)
.map(Optional::get)
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
public void removeApplication(ApplicationId application) {
curator.delete(applicationPath(application));
}
@SuppressWarnings("unused")
public Optional<byte[]> readProvisionState(String provisionId) {
return curator.getData(provisionStatePath(provisionId));
}
@SuppressWarnings("unused")
public void writeProvisionState(String provisionId, byte[] data) {
curator.set(provisionStatePath(provisionId), data);
}
@SuppressWarnings("unused")
public List<String> readProvisionStateIds() {
return curator.getChildren(provisionStatePath());
}
@SuppressWarnings("unused")
public Optional<byte[]> readVespaServerPool() {
return curator.getData(vespaServerPoolPath());
}
@SuppressWarnings("unused")
public void writeVespaServerPool(byte[] data) {
curator.set(vespaServerPoolPath(), data);
}
@SuppressWarnings("unused")
public Optional<byte[]> readOpenStackServerPool() {
return curator.getData(openStackServerPoolPath());
}
@SuppressWarnings("unused")
public void writeOpenStackServerPool(byte[] data) {
curator.set(openStackServerPoolPath(), data);
}
private Path lockPath(TenantName tenant) {
Path lockPath = lockRoot
.append(tenant.value());
curator.create(lockPath);
return lockPath;
}
private Path lockPath(ApplicationId application) {
Path lockPath = lockRoot
.append(application.tenant().value())
.append(application.application().value())
.append(application.instance().value());
curator.create(lockPath);
return lockPath;
}
private Path lockPath(String provisionId) {
Path lockPath = lockRoot
.append(provisionStatePath())
.append(provisionId);
curator.create(lockPath);
return lockPath;
}
private static Path inactiveJobsPath() {
return root.append("inactiveJobs");
}
private static Path upgradesPerMinutePath() {
return root.append("upgrader").append("upgradesPerMinute");
}
private static Path confidenceOverridesPath() {
return root.append("upgrader").append("confidenceOverrides");
}
private static Path versionStatusPath() {
return root.append("versionStatus");
}
private static Path provisionStatePath() {
return root.append("provisioning").append("states");
}
private static Path provisionStatePath(String provisionId) {
return provisionStatePath().append(provisionId);
}
private static Path vespaServerPoolPath() {
return root.append("vespaServerPool");
}
private static Path openStackServerPoolPath() {
return root.append("openStackServerPool");
}
private static Path tenantPath(TenantName name) {
return tenantRoot.append(name.value());
}
private static Path applicationPath(ApplicationId application) {
return applicationRoot.append(application.serializedForm());
}
private static Path controllerPath(String hostname) {
return controllerRoot.append(hostname);
}
} |
I think `.map(host -> host.replaceFirst(":.*", ""))` is clearer, but feel free to disagree. | public List<HostName> cluster() {
return Arrays.stream(zooKeeperEnsembleConnectionSpec.split(","))
.filter(hostPort -> !hostPort.isEmpty())
.map(hostPort -> hostPort.contains(":")
? hostPort.substring(0, hostPort.indexOf(":"))
: hostPort)
.map(HostName::from)
.collect(Collectors.toList());
} | : hostPort) | public List<HostName> cluster() {
return Arrays.stream(curator.zooKeeperEnsembleConnectionSpec().split(","))
.filter(hostAndPort -> !hostAndPort.isEmpty())
.map(hostAndPort -> hostAndPort.split(":")[0])
.map(HostName::from)
.collect(Collectors.toList());
} | class CuratorDb {
private static final Logger log = Logger.getLogger(CuratorDb.class.getName());
private static final Duration defaultLockTimeout = Duration.ofMinutes(5);
private static final Path root = Path.fromString("/controller/v1");
private static final Path lockRoot = root.append("locks");
private static final Path tenantRoot = root.append("tenants");
private static final Path applicationRoot = root.append("applications");
private static final Path controllerRoot = root.append("controllers");
private final StringSetSerializer stringSetSerializer = new StringSetSerializer();
private final VersionStatusSerializer versionStatusSerializer = new VersionStatusSerializer();
private final VersionSerializer versionSerializer = new VersionSerializer();
private final ConfidenceOverrideSerializer confidenceOverrideSerializer = new ConfidenceOverrideSerializer();
private final TenantSerializer tenantSerializer = new TenantSerializer();
private final ApplicationSerializer applicationSerializer = new ApplicationSerializer();
private final Curator curator;
private final String zooKeeperEnsembleConnectionSpec;
/**
* All keys, to allow reentrancy.
* This will grow forever, but this should be too slow to be a problem.
*/
private final ConcurrentHashMap<Path, Lock> locks = new ConcurrentHashMap<>();
@Inject
public CuratorDb(Curator curator) {
this(curator, curator.zooKeeperEnsembleConnectionSpec());
}
CuratorDb(Curator curator, String zooKeeperEnsembleConnectionSpec) {
this.curator = curator;
this.zooKeeperEnsembleConnectionSpec = zooKeeperEnsembleConnectionSpec;
}
/** Returns all hosts configured to be part of this ZooKeeper cluster */
public Lock lock(TenantName name, Duration timeout) {
return lock(lockPath(name), timeout);
}
public Lock lock(ApplicationId id, Duration timeout) {
return lock(lockPath(id), timeout);
}
public Lock lockRotations() {
return lock(lockRoot.append("rotations"), defaultLockTimeout);
}
/** Create a reentrant lock */
private Lock lock(Path path, Duration timeout) {
Lock lock = locks.computeIfAbsent(path, (pathArg) -> new Lock(pathArg.getAbsolute(), curator));
lock.acquire(timeout);
return lock;
}
public Lock lockInactiveJobs() {
return lock(lockRoot.append("inactiveJobsLock"), defaultLockTimeout);
}
public Lock lockMaintenanceJob(String jobName) {
return lock(lockRoot.append("maintenanceJobLocks").append(jobName), Duration.ofSeconds(1));
}
@SuppressWarnings("unused")
public Lock lockProvisionState(String provisionStateId) {
return lock(lockPath(provisionStateId), Duration.ofSeconds(1));
}
@SuppressWarnings("unused")
public Lock lockVespaServerPool() {
return lock(lockRoot.append("vespaServerPoolLock"), Duration.ofSeconds(1));
}
@SuppressWarnings("unused")
public Lock lockOpenStackServerPool() {
return lock(lockRoot.append("openStackServerPoolLock"), Duration.ofSeconds(1));
}
private <T> Optional<T> read(Path path, Function<byte[], T> mapper) {
return curator.getData(path).filter(data -> data.length > 0).map(mapper);
}
private Optional<Slime> readSlime(Path path) {
return read(path, SlimeUtils::jsonToSlime);
}
private static byte[] asJson(Slime slime) {
try {
return SlimeUtils.toJsonBytes(slime);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
public Set<String> readInactiveJobs() {
try {
return readSlime(inactiveJobsPath()).map(stringSetSerializer::fromSlime).orElseGet(HashSet::new);
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Error reading inactive jobs, deleting inactive state");
writeInactiveJobs(Collections.emptySet());
return new HashSet<>();
}
}
public void writeInactiveJobs(Set<String> inactiveJobs) {
curator.set(inactiveJobsPath(), stringSetSerializer.toJson(inactiveJobs));
}
public double readUpgradesPerMinute() {
return read(upgradesPerMinutePath(), ByteBuffer::wrap).map(ByteBuffer::getDouble).orElse(0.5);
}
public void writeUpgradesPerMinute(double n) {
if (n < 0) {
throw new IllegalArgumentException("Upgrades per minute must be >= 0");
}
curator.set(upgradesPerMinutePath(), ByteBuffer.allocate(Double.BYTES).putDouble(n).array());
}
public void writeVersionStatus(VersionStatus status) {
curator.set(versionStatusPath(), asJson(versionStatusSerializer.toSlime(status)));
}
public VersionStatus readVersionStatus() {
return readSlime(versionStatusPath()).map(versionStatusSerializer::fromSlime).orElseGet(VersionStatus::empty);
}
public void writeConfidenceOverrides(Map<Version, VespaVersion.Confidence> overrides) {
curator.set(confidenceOverridesPath(), asJson(confidenceOverrideSerializer.toSlime(overrides)));
}
public Map<Version, VespaVersion.Confidence> readConfidenceOverrides() {
return readSlime(confidenceOverridesPath()).map(confidenceOverrideSerializer::fromSlime)
.orElseGet(Collections::emptyMap);
}
public void writeControllerVersion(HostName hostname, Version version) {
curator.set(controllerPath(hostname.value()), asJson(versionSerializer.toSlime(version)));
}
public Version readControllerVersion(HostName hostname) {
return readSlime(controllerPath(hostname.value()))
.map(versionSerializer::fromSlime)
.orElse(Vtag.currentVersion);
}
public void writeTenant(UserTenant tenant) {
curator.set(tenantPath(tenant.name()), asJson(tenantSerializer.toSlime(tenant)));
}
public Optional<UserTenant> readUserTenant(TenantName name) {
return readSlime(tenantPath(name)).map(tenantSerializer::userTenantFrom);
}
public void writeTenant(AthenzTenant tenant) {
curator.set(tenantPath(tenant.name()), asJson(tenantSerializer.toSlime(tenant)));
}
public Optional<AthenzTenant> readAthenzTenant(TenantName name) {
return readSlime(tenantPath(name)).map(tenantSerializer::athenzTenantFrom);
}
public Optional<Tenant> readTenant(TenantName name) {
if (name.value().startsWith(Tenant.userPrefix)) {
return readUserTenant(name).map(Tenant.class::cast);
}
return readAthenzTenant(name).map(Tenant.class::cast);
}
public List<Tenant> readTenants() {
return curator.getChildren(tenantRoot).stream()
.map(TenantName::from)
.map(this::readTenant)
.filter(Optional::isPresent)
.map(Optional::get)
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
public void removeTenant(TenantName name) {
curator.delete(tenantPath(name));
}
public void writeApplication(Application application) {
curator.set(applicationPath(application.id()), asJson(applicationSerializer.toSlime(application)));
}
public Optional<Application> readApplication(ApplicationId application) {
return readSlime(applicationPath(application)).map(applicationSerializer::fromSlime);
}
public List<Application> readApplications() {
return readApplications(ignored -> true);
}
public List<Application> readApplications(TenantName name) {
return readApplications(application -> application.tenant().equals(name));
}
private List<Application> readApplications(Predicate<ApplicationId> applicationFilter) {
return curator.getChildren(applicationRoot).stream()
.map(ApplicationId::fromSerializedForm)
.filter(applicationFilter)
.map(this::readApplication)
.filter(Optional::isPresent)
.map(Optional::get)
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
public void removeApplication(ApplicationId application) {
curator.delete(applicationPath(application));
}
@SuppressWarnings("unused")
public Optional<byte[]> readProvisionState(String provisionId) {
return curator.getData(provisionStatePath(provisionId));
}
@SuppressWarnings("unused")
public void writeProvisionState(String provisionId, byte[] data) {
curator.set(provisionStatePath(provisionId), data);
}
@SuppressWarnings("unused")
public List<String> readProvisionStateIds() {
return curator.getChildren(provisionStatePath());
}
@SuppressWarnings("unused")
public Optional<byte[]> readVespaServerPool() {
return curator.getData(vespaServerPoolPath());
}
@SuppressWarnings("unused")
public void writeVespaServerPool(byte[] data) {
curator.set(vespaServerPoolPath(), data);
}
@SuppressWarnings("unused")
public Optional<byte[]> readOpenStackServerPool() {
return curator.getData(openStackServerPoolPath());
}
@SuppressWarnings("unused")
public void writeOpenStackServerPool(byte[] data) {
curator.set(openStackServerPoolPath(), data);
}
private Path lockPath(TenantName tenant) {
Path lockPath = lockRoot
.append(tenant.value());
curator.create(lockPath);
return lockPath;
}
private Path lockPath(ApplicationId application) {
Path lockPath = lockRoot
.append(application.tenant().value())
.append(application.application().value())
.append(application.instance().value());
curator.create(lockPath);
return lockPath;
}
private Path lockPath(String provisionId) {
Path lockPath = lockRoot
.append(provisionStatePath())
.append(provisionId);
curator.create(lockPath);
return lockPath;
}
private static Path inactiveJobsPath() {
return root.append("inactiveJobs");
}
private static Path upgradesPerMinutePath() {
return root.append("upgrader").append("upgradesPerMinute");
}
private static Path confidenceOverridesPath() {
return root.append("upgrader").append("confidenceOverrides");
}
private static Path versionStatusPath() {
return root.append("versionStatus");
}
private static Path provisionStatePath() {
return root.append("provisioning").append("states");
}
private static Path provisionStatePath(String provisionId) {
return provisionStatePath().append(provisionId);
}
private static Path vespaServerPoolPath() {
return root.append("vespaServerPool");
}
private static Path openStackServerPoolPath() {
return root.append("openStackServerPool");
}
private static Path tenantPath(TenantName name) {
return tenantRoot.append(name.value());
}
private static Path applicationPath(ApplicationId application) {
return applicationRoot.append(application.serializedForm());
}
private static Path controllerPath(String hostname) {
return controllerRoot.append(hostname);
}
} | class CuratorDb {
private static final Logger log = Logger.getLogger(CuratorDb.class.getName());
private static final Duration defaultLockTimeout = Duration.ofMinutes(5);
private static final Path root = Path.fromString("/controller/v1");
private static final Path lockRoot = root.append("locks");
private static final Path tenantRoot = root.append("tenants");
private static final Path applicationRoot = root.append("applications");
private static final Path controllerRoot = root.append("controllers");
private final StringSetSerializer stringSetSerializer = new StringSetSerializer();
private final VersionStatusSerializer versionStatusSerializer = new VersionStatusSerializer();
private final VersionSerializer versionSerializer = new VersionSerializer();
private final ConfidenceOverrideSerializer confidenceOverrideSerializer = new ConfidenceOverrideSerializer();
private final TenantSerializer tenantSerializer = new TenantSerializer();
private final ApplicationSerializer applicationSerializer = new ApplicationSerializer();
private final Curator curator;
/**
* All keys, to allow reentrancy.
* This will grow forever, but this should be too slow to be a problem.
*/
private final ConcurrentHashMap<Path, Lock> locks = new ConcurrentHashMap<>();
@Inject
public CuratorDb(Curator curator) {
this.curator = curator;
}
/** Returns all hosts configured to be part of this ZooKeeper cluster */
public Lock lock(TenantName name, Duration timeout) {
return lock(lockPath(name), timeout);
}
public Lock lock(ApplicationId id, Duration timeout) {
return lock(lockPath(id), timeout);
}
public Lock lockRotations() {
return lock(lockRoot.append("rotations"), defaultLockTimeout);
}
/** Create a reentrant lock */
private Lock lock(Path path, Duration timeout) {
Lock lock = locks.computeIfAbsent(path, (pathArg) -> new Lock(pathArg.getAbsolute(), curator));
lock.acquire(timeout);
return lock;
}
public Lock lockInactiveJobs() {
return lock(lockRoot.append("inactiveJobsLock"), defaultLockTimeout);
}
public Lock lockMaintenanceJob(String jobName) {
return lock(lockRoot.append("maintenanceJobLocks").append(jobName), Duration.ofSeconds(1));
}
@SuppressWarnings("unused")
public Lock lockProvisionState(String provisionStateId) {
return lock(lockPath(provisionStateId), Duration.ofSeconds(1));
}
@SuppressWarnings("unused")
public Lock lockVespaServerPool() {
return lock(lockRoot.append("vespaServerPoolLock"), Duration.ofSeconds(1));
}
@SuppressWarnings("unused")
public Lock lockOpenStackServerPool() {
return lock(lockRoot.append("openStackServerPoolLock"), Duration.ofSeconds(1));
}
private <T> Optional<T> read(Path path, Function<byte[], T> mapper) {
return curator.getData(path).filter(data -> data.length > 0).map(mapper);
}
private Optional<Slime> readSlime(Path path) {
return read(path, SlimeUtils::jsonToSlime);
}
private static byte[] asJson(Slime slime) {
try {
return SlimeUtils.toJsonBytes(slime);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
public Set<String> readInactiveJobs() {
try {
return readSlime(inactiveJobsPath()).map(stringSetSerializer::fromSlime).orElseGet(HashSet::new);
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Error reading inactive jobs, deleting inactive state");
writeInactiveJobs(Collections.emptySet());
return new HashSet<>();
}
}
public void writeInactiveJobs(Set<String> inactiveJobs) {
curator.set(inactiveJobsPath(), stringSetSerializer.toJson(inactiveJobs));
}
public double readUpgradesPerMinute() {
return read(upgradesPerMinutePath(), ByteBuffer::wrap).map(ByteBuffer::getDouble).orElse(0.5);
}
public void writeUpgradesPerMinute(double n) {
if (n < 0) {
throw new IllegalArgumentException("Upgrades per minute must be >= 0");
}
curator.set(upgradesPerMinutePath(), ByteBuffer.allocate(Double.BYTES).putDouble(n).array());
}
public void writeVersionStatus(VersionStatus status) {
curator.set(versionStatusPath(), asJson(versionStatusSerializer.toSlime(status)));
}
public VersionStatus readVersionStatus() {
return readSlime(versionStatusPath()).map(versionStatusSerializer::fromSlime).orElseGet(VersionStatus::empty);
}
public void writeConfidenceOverrides(Map<Version, VespaVersion.Confidence> overrides) {
curator.set(confidenceOverridesPath(), asJson(confidenceOverrideSerializer.toSlime(overrides)));
}
public Map<Version, VespaVersion.Confidence> readConfidenceOverrides() {
return readSlime(confidenceOverridesPath()).map(confidenceOverrideSerializer::fromSlime)
.orElseGet(Collections::emptyMap);
}
public void writeControllerVersion(HostName hostname, Version version) {
curator.set(controllerPath(hostname.value()), asJson(versionSerializer.toSlime(version)));
}
public Version readControllerVersion(HostName hostname) {
return readSlime(controllerPath(hostname.value()))
.map(versionSerializer::fromSlime)
.orElse(Vtag.currentVersion);
}
public void writeTenant(UserTenant tenant) {
curator.set(tenantPath(tenant.name()), asJson(tenantSerializer.toSlime(tenant)));
}
public Optional<UserTenant> readUserTenant(TenantName name) {
return readSlime(tenantPath(name)).map(tenantSerializer::userTenantFrom);
}
public void writeTenant(AthenzTenant tenant) {
curator.set(tenantPath(tenant.name()), asJson(tenantSerializer.toSlime(tenant)));
}
public Optional<AthenzTenant> readAthenzTenant(TenantName name) {
return readSlime(tenantPath(name)).map(tenantSerializer::athenzTenantFrom);
}
public Optional<Tenant> readTenant(TenantName name) {
if (name.value().startsWith(Tenant.userPrefix)) {
return readUserTenant(name).map(Tenant.class::cast);
}
return readAthenzTenant(name).map(Tenant.class::cast);
}
public List<Tenant> readTenants() {
return curator.getChildren(tenantRoot).stream()
.map(TenantName::from)
.map(this::readTenant)
.filter(Optional::isPresent)
.map(Optional::get)
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
public void removeTenant(TenantName name) {
curator.delete(tenantPath(name));
}
public void writeApplication(Application application) {
curator.set(applicationPath(application.id()), asJson(applicationSerializer.toSlime(application)));
}
public Optional<Application> readApplication(ApplicationId application) {
return readSlime(applicationPath(application)).map(applicationSerializer::fromSlime);
}
public List<Application> readApplications() {
return readApplications(ignored -> true);
}
public List<Application> readApplications(TenantName name) {
return readApplications(application -> application.tenant().equals(name));
}
private List<Application> readApplications(Predicate<ApplicationId> applicationFilter) {
return curator.getChildren(applicationRoot).stream()
.map(ApplicationId::fromSerializedForm)
.filter(applicationFilter)
.map(this::readApplication)
.filter(Optional::isPresent)
.map(Optional::get)
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
public void removeApplication(ApplicationId application) {
curator.delete(applicationPath(application));
}
@SuppressWarnings("unused")
public Optional<byte[]> readProvisionState(String provisionId) {
return curator.getData(provisionStatePath(provisionId));
}
@SuppressWarnings("unused")
public void writeProvisionState(String provisionId, byte[] data) {
curator.set(provisionStatePath(provisionId), data);
}
@SuppressWarnings("unused")
public List<String> readProvisionStateIds() {
return curator.getChildren(provisionStatePath());
}
@SuppressWarnings("unused")
public Optional<byte[]> readVespaServerPool() {
return curator.getData(vespaServerPoolPath());
}
@SuppressWarnings("unused")
public void writeVespaServerPool(byte[] data) {
curator.set(vespaServerPoolPath(), data);
}
@SuppressWarnings("unused")
public Optional<byte[]> readOpenStackServerPool() {
return curator.getData(openStackServerPoolPath());
}
@SuppressWarnings("unused")
public void writeOpenStackServerPool(byte[] data) {
curator.set(openStackServerPoolPath(), data);
}
private Path lockPath(TenantName tenant) {
Path lockPath = lockRoot
.append(tenant.value());
curator.create(lockPath);
return lockPath;
}
private Path lockPath(ApplicationId application) {
Path lockPath = lockRoot
.append(application.tenant().value())
.append(application.application().value())
.append(application.instance().value());
curator.create(lockPath);
return lockPath;
}
private Path lockPath(String provisionId) {
Path lockPath = lockRoot
.append(provisionStatePath())
.append(provisionId);
curator.create(lockPath);
return lockPath;
}
private static Path inactiveJobsPath() {
return root.append("inactiveJobs");
}
private static Path upgradesPerMinutePath() {
return root.append("upgrader").append("upgradesPerMinute");
}
private static Path confidenceOverridesPath() {
return root.append("upgrader").append("confidenceOverrides");
}
private static Path versionStatusPath() {
return root.append("versionStatus");
}
private static Path provisionStatePath() {
return root.append("provisioning").append("states");
}
private static Path provisionStatePath(String provisionId) {
return provisionStatePath().append(provisionId);
}
private static Path vespaServerPoolPath() {
return root.append("vespaServerPool");
}
private static Path openStackServerPoolPath() {
return root.append("openStackServerPool");
}
private static Path tenantPath(TenantName name) {
return tenantRoot.append(name.value());
}
private static Path applicationPath(ApplicationId application) {
return applicationRoot.append(application.serializedForm());
}
private static Path controllerPath(String hostname) {
return controllerRoot.append(hostname);
}
} |
Fixed. | public List<HostName> cluster() {
return Arrays.stream(zooKeeperEnsembleConnectionSpec.split(","))
.filter(hostPort -> !hostPort.isEmpty())
.map(hostPort -> hostPort.contains(":")
? hostPort.substring(0, hostPort.indexOf(":"))
: hostPort)
.map(HostName::from)
.collect(Collectors.toList());
} | .filter(hostPort -> !hostPort.isEmpty()) | public List<HostName> cluster() {
return Arrays.stream(curator.zooKeeperEnsembleConnectionSpec().split(","))
.filter(hostAndPort -> !hostAndPort.isEmpty())
.map(hostAndPort -> hostAndPort.split(":")[0])
.map(HostName::from)
.collect(Collectors.toList());
} | class CuratorDb {
private static final Logger log = Logger.getLogger(CuratorDb.class.getName());
private static final Duration defaultLockTimeout = Duration.ofMinutes(5);
private static final Path root = Path.fromString("/controller/v1");
private static final Path lockRoot = root.append("locks");
private static final Path tenantRoot = root.append("tenants");
private static final Path applicationRoot = root.append("applications");
private static final Path controllerRoot = root.append("controllers");
private final StringSetSerializer stringSetSerializer = new StringSetSerializer();
private final VersionStatusSerializer versionStatusSerializer = new VersionStatusSerializer();
private final VersionSerializer versionSerializer = new VersionSerializer();
private final ConfidenceOverrideSerializer confidenceOverrideSerializer = new ConfidenceOverrideSerializer();
private final TenantSerializer tenantSerializer = new TenantSerializer();
private final ApplicationSerializer applicationSerializer = new ApplicationSerializer();
private final Curator curator;
private final String zooKeeperEnsembleConnectionSpec;
/**
* All keys, to allow reentrancy.
* This will grow forever, but this should be too slow to be a problem.
*/
private final ConcurrentHashMap<Path, Lock> locks = new ConcurrentHashMap<>();
@Inject
public CuratorDb(Curator curator) {
this(curator, curator.zooKeeperEnsembleConnectionSpec());
}
CuratorDb(Curator curator, String zooKeeperEnsembleConnectionSpec) {
this.curator = curator;
this.zooKeeperEnsembleConnectionSpec = zooKeeperEnsembleConnectionSpec;
}
/** Returns all hosts configured to be part of this ZooKeeper cluster */
public Lock lock(TenantName name, Duration timeout) {
return lock(lockPath(name), timeout);
}
public Lock lock(ApplicationId id, Duration timeout) {
return lock(lockPath(id), timeout);
}
public Lock lockRotations() {
return lock(lockRoot.append("rotations"), defaultLockTimeout);
}
/** Create a reentrant lock */
private Lock lock(Path path, Duration timeout) {
Lock lock = locks.computeIfAbsent(path, (pathArg) -> new Lock(pathArg.getAbsolute(), curator));
lock.acquire(timeout);
return lock;
}
public Lock lockInactiveJobs() {
return lock(lockRoot.append("inactiveJobsLock"), defaultLockTimeout);
}
public Lock lockMaintenanceJob(String jobName) {
return lock(lockRoot.append("maintenanceJobLocks").append(jobName), Duration.ofSeconds(1));
}
@SuppressWarnings("unused")
public Lock lockProvisionState(String provisionStateId) {
return lock(lockPath(provisionStateId), Duration.ofSeconds(1));
}
@SuppressWarnings("unused")
public Lock lockVespaServerPool() {
return lock(lockRoot.append("vespaServerPoolLock"), Duration.ofSeconds(1));
}
@SuppressWarnings("unused")
public Lock lockOpenStackServerPool() {
return lock(lockRoot.append("openStackServerPoolLock"), Duration.ofSeconds(1));
}
private <T> Optional<T> read(Path path, Function<byte[], T> mapper) {
return curator.getData(path).filter(data -> data.length > 0).map(mapper);
}
private Optional<Slime> readSlime(Path path) {
return read(path, SlimeUtils::jsonToSlime);
}
private static byte[] asJson(Slime slime) {
try {
return SlimeUtils.toJsonBytes(slime);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
public Set<String> readInactiveJobs() {
try {
return readSlime(inactiveJobsPath()).map(stringSetSerializer::fromSlime).orElseGet(HashSet::new);
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Error reading inactive jobs, deleting inactive state");
writeInactiveJobs(Collections.emptySet());
return new HashSet<>();
}
}
public void writeInactiveJobs(Set<String> inactiveJobs) {
curator.set(inactiveJobsPath(), stringSetSerializer.toJson(inactiveJobs));
}
public double readUpgradesPerMinute() {
return read(upgradesPerMinutePath(), ByteBuffer::wrap).map(ByteBuffer::getDouble).orElse(0.5);
}
public void writeUpgradesPerMinute(double n) {
if (n < 0) {
throw new IllegalArgumentException("Upgrades per minute must be >= 0");
}
curator.set(upgradesPerMinutePath(), ByteBuffer.allocate(Double.BYTES).putDouble(n).array());
}
public void writeVersionStatus(VersionStatus status) {
curator.set(versionStatusPath(), asJson(versionStatusSerializer.toSlime(status)));
}
public VersionStatus readVersionStatus() {
return readSlime(versionStatusPath()).map(versionStatusSerializer::fromSlime).orElseGet(VersionStatus::empty);
}
public void writeConfidenceOverrides(Map<Version, VespaVersion.Confidence> overrides) {
curator.set(confidenceOverridesPath(), asJson(confidenceOverrideSerializer.toSlime(overrides)));
}
public Map<Version, VespaVersion.Confidence> readConfidenceOverrides() {
return readSlime(confidenceOverridesPath()).map(confidenceOverrideSerializer::fromSlime)
.orElseGet(Collections::emptyMap);
}
public void writeControllerVersion(HostName hostname, Version version) {
curator.set(controllerPath(hostname.value()), asJson(versionSerializer.toSlime(version)));
}
public Version readControllerVersion(HostName hostname) {
return readSlime(controllerPath(hostname.value()))
.map(versionSerializer::fromSlime)
.orElse(Vtag.currentVersion);
}
public void writeTenant(UserTenant tenant) {
curator.set(tenantPath(tenant.name()), asJson(tenantSerializer.toSlime(tenant)));
}
public Optional<UserTenant> readUserTenant(TenantName name) {
return readSlime(tenantPath(name)).map(tenantSerializer::userTenantFrom);
}
public void writeTenant(AthenzTenant tenant) {
curator.set(tenantPath(tenant.name()), asJson(tenantSerializer.toSlime(tenant)));
}
public Optional<AthenzTenant> readAthenzTenant(TenantName name) {
return readSlime(tenantPath(name)).map(tenantSerializer::athenzTenantFrom);
}
public Optional<Tenant> readTenant(TenantName name) {
if (name.value().startsWith(Tenant.userPrefix)) {
return readUserTenant(name).map(Tenant.class::cast);
}
return readAthenzTenant(name).map(Tenant.class::cast);
}
public List<Tenant> readTenants() {
return curator.getChildren(tenantRoot).stream()
.map(TenantName::from)
.map(this::readTenant)
.filter(Optional::isPresent)
.map(Optional::get)
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
public void removeTenant(TenantName name) {
curator.delete(tenantPath(name));
}
public void writeApplication(Application application) {
curator.set(applicationPath(application.id()), asJson(applicationSerializer.toSlime(application)));
}
public Optional<Application> readApplication(ApplicationId application) {
return readSlime(applicationPath(application)).map(applicationSerializer::fromSlime);
}
public List<Application> readApplications() {
return readApplications(ignored -> true);
}
public List<Application> readApplications(TenantName name) {
return readApplications(application -> application.tenant().equals(name));
}
private List<Application> readApplications(Predicate<ApplicationId> applicationFilter) {
return curator.getChildren(applicationRoot).stream()
.map(ApplicationId::fromSerializedForm)
.filter(applicationFilter)
.map(this::readApplication)
.filter(Optional::isPresent)
.map(Optional::get)
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
public void removeApplication(ApplicationId application) {
curator.delete(applicationPath(application));
}
@SuppressWarnings("unused")
public Optional<byte[]> readProvisionState(String provisionId) {
return curator.getData(provisionStatePath(provisionId));
}
@SuppressWarnings("unused")
public void writeProvisionState(String provisionId, byte[] data) {
curator.set(provisionStatePath(provisionId), data);
}
@SuppressWarnings("unused")
public List<String> readProvisionStateIds() {
return curator.getChildren(provisionStatePath());
}
@SuppressWarnings("unused")
public Optional<byte[]> readVespaServerPool() {
return curator.getData(vespaServerPoolPath());
}
@SuppressWarnings("unused")
public void writeVespaServerPool(byte[] data) {
curator.set(vespaServerPoolPath(), data);
}
@SuppressWarnings("unused")
public Optional<byte[]> readOpenStackServerPool() {
return curator.getData(openStackServerPoolPath());
}
@SuppressWarnings("unused")
public void writeOpenStackServerPool(byte[] data) {
curator.set(openStackServerPoolPath(), data);
}
private Path lockPath(TenantName tenant) {
Path lockPath = lockRoot
.append(tenant.value());
curator.create(lockPath);
return lockPath;
}
private Path lockPath(ApplicationId application) {
Path lockPath = lockRoot
.append(application.tenant().value())
.append(application.application().value())
.append(application.instance().value());
curator.create(lockPath);
return lockPath;
}
private Path lockPath(String provisionId) {
Path lockPath = lockRoot
.append(provisionStatePath())
.append(provisionId);
curator.create(lockPath);
return lockPath;
}
private static Path inactiveJobsPath() {
return root.append("inactiveJobs");
}
private static Path upgradesPerMinutePath() {
return root.append("upgrader").append("upgradesPerMinute");
}
private static Path confidenceOverridesPath() {
return root.append("upgrader").append("confidenceOverrides");
}
private static Path versionStatusPath() {
return root.append("versionStatus");
}
private static Path provisionStatePath() {
return root.append("provisioning").append("states");
}
private static Path provisionStatePath(String provisionId) {
return provisionStatePath().append(provisionId);
}
private static Path vespaServerPoolPath() {
return root.append("vespaServerPool");
}
private static Path openStackServerPoolPath() {
return root.append("openStackServerPool");
}
private static Path tenantPath(TenantName name) {
return tenantRoot.append(name.value());
}
private static Path applicationPath(ApplicationId application) {
return applicationRoot.append(application.serializedForm());
}
private static Path controllerPath(String hostname) {
return controllerRoot.append(hostname);
}
} | class CuratorDb {
private static final Logger log = Logger.getLogger(CuratorDb.class.getName());
private static final Duration defaultLockTimeout = Duration.ofMinutes(5);
private static final Path root = Path.fromString("/controller/v1");
private static final Path lockRoot = root.append("locks");
private static final Path tenantRoot = root.append("tenants");
private static final Path applicationRoot = root.append("applications");
private static final Path controllerRoot = root.append("controllers");
private final StringSetSerializer stringSetSerializer = new StringSetSerializer();
private final VersionStatusSerializer versionStatusSerializer = new VersionStatusSerializer();
private final VersionSerializer versionSerializer = new VersionSerializer();
private final ConfidenceOverrideSerializer confidenceOverrideSerializer = new ConfidenceOverrideSerializer();
private final TenantSerializer tenantSerializer = new TenantSerializer();
private final ApplicationSerializer applicationSerializer = new ApplicationSerializer();
private final Curator curator;
/**
* All keys, to allow reentrancy.
* This will grow forever, but this should be too slow to be a problem.
*/
private final ConcurrentHashMap<Path, Lock> locks = new ConcurrentHashMap<>();
@Inject
public CuratorDb(Curator curator) {
this.curator = curator;
}
/** Returns all hosts configured to be part of this ZooKeeper cluster */
public Lock lock(TenantName name, Duration timeout) {
return lock(lockPath(name), timeout);
}
public Lock lock(ApplicationId id, Duration timeout) {
return lock(lockPath(id), timeout);
}
public Lock lockRotations() {
return lock(lockRoot.append("rotations"), defaultLockTimeout);
}
/** Create a reentrant lock */
private Lock lock(Path path, Duration timeout) {
Lock lock = locks.computeIfAbsent(path, (pathArg) -> new Lock(pathArg.getAbsolute(), curator));
lock.acquire(timeout);
return lock;
}
public Lock lockInactiveJobs() {
return lock(lockRoot.append("inactiveJobsLock"), defaultLockTimeout);
}
public Lock lockMaintenanceJob(String jobName) {
return lock(lockRoot.append("maintenanceJobLocks").append(jobName), Duration.ofSeconds(1));
}
@SuppressWarnings("unused")
public Lock lockProvisionState(String provisionStateId) {
return lock(lockPath(provisionStateId), Duration.ofSeconds(1));
}
@SuppressWarnings("unused")
public Lock lockVespaServerPool() {
return lock(lockRoot.append("vespaServerPoolLock"), Duration.ofSeconds(1));
}
@SuppressWarnings("unused")
public Lock lockOpenStackServerPool() {
return lock(lockRoot.append("openStackServerPoolLock"), Duration.ofSeconds(1));
}
private <T> Optional<T> read(Path path, Function<byte[], T> mapper) {
return curator.getData(path).filter(data -> data.length > 0).map(mapper);
}
private Optional<Slime> readSlime(Path path) {
return read(path, SlimeUtils::jsonToSlime);
}
private static byte[] asJson(Slime slime) {
try {
return SlimeUtils.toJsonBytes(slime);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
public Set<String> readInactiveJobs() {
try {
return readSlime(inactiveJobsPath()).map(stringSetSerializer::fromSlime).orElseGet(HashSet::new);
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Error reading inactive jobs, deleting inactive state");
writeInactiveJobs(Collections.emptySet());
return new HashSet<>();
}
}
public void writeInactiveJobs(Set<String> inactiveJobs) {
curator.set(inactiveJobsPath(), stringSetSerializer.toJson(inactiveJobs));
}
public double readUpgradesPerMinute() {
return read(upgradesPerMinutePath(), ByteBuffer::wrap).map(ByteBuffer::getDouble).orElse(0.5);
}
public void writeUpgradesPerMinute(double n) {
if (n < 0) {
throw new IllegalArgumentException("Upgrades per minute must be >= 0");
}
curator.set(upgradesPerMinutePath(), ByteBuffer.allocate(Double.BYTES).putDouble(n).array());
}
public void writeVersionStatus(VersionStatus status) {
curator.set(versionStatusPath(), asJson(versionStatusSerializer.toSlime(status)));
}
public VersionStatus readVersionStatus() {
return readSlime(versionStatusPath()).map(versionStatusSerializer::fromSlime).orElseGet(VersionStatus::empty);
}
public void writeConfidenceOverrides(Map<Version, VespaVersion.Confidence> overrides) {
curator.set(confidenceOverridesPath(), asJson(confidenceOverrideSerializer.toSlime(overrides)));
}
public Map<Version, VespaVersion.Confidence> readConfidenceOverrides() {
return readSlime(confidenceOverridesPath()).map(confidenceOverrideSerializer::fromSlime)
.orElseGet(Collections::emptyMap);
}
public void writeControllerVersion(HostName hostname, Version version) {
curator.set(controllerPath(hostname.value()), asJson(versionSerializer.toSlime(version)));
}
public Version readControllerVersion(HostName hostname) {
return readSlime(controllerPath(hostname.value()))
.map(versionSerializer::fromSlime)
.orElse(Vtag.currentVersion);
}
public void writeTenant(UserTenant tenant) {
curator.set(tenantPath(tenant.name()), asJson(tenantSerializer.toSlime(tenant)));
}
public Optional<UserTenant> readUserTenant(TenantName name) {
return readSlime(tenantPath(name)).map(tenantSerializer::userTenantFrom);
}
public void writeTenant(AthenzTenant tenant) {
curator.set(tenantPath(tenant.name()), asJson(tenantSerializer.toSlime(tenant)));
}
public Optional<AthenzTenant> readAthenzTenant(TenantName name) {
return readSlime(tenantPath(name)).map(tenantSerializer::athenzTenantFrom);
}
public Optional<Tenant> readTenant(TenantName name) {
if (name.value().startsWith(Tenant.userPrefix)) {
return readUserTenant(name).map(Tenant.class::cast);
}
return readAthenzTenant(name).map(Tenant.class::cast);
}
public List<Tenant> readTenants() {
return curator.getChildren(tenantRoot).stream()
.map(TenantName::from)
.map(this::readTenant)
.filter(Optional::isPresent)
.map(Optional::get)
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
public void removeTenant(TenantName name) {
curator.delete(tenantPath(name));
}
public void writeApplication(Application application) {
curator.set(applicationPath(application.id()), asJson(applicationSerializer.toSlime(application)));
}
public Optional<Application> readApplication(ApplicationId application) {
return readSlime(applicationPath(application)).map(applicationSerializer::fromSlime);
}
public List<Application> readApplications() {
return readApplications(ignored -> true);
}
public List<Application> readApplications(TenantName name) {
return readApplications(application -> application.tenant().equals(name));
}
private List<Application> readApplications(Predicate<ApplicationId> applicationFilter) {
return curator.getChildren(applicationRoot).stream()
.map(ApplicationId::fromSerializedForm)
.filter(applicationFilter)
.map(this::readApplication)
.filter(Optional::isPresent)
.map(Optional::get)
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
public void removeApplication(ApplicationId application) {
curator.delete(applicationPath(application));
}
@SuppressWarnings("unused")
public Optional<byte[]> readProvisionState(String provisionId) {
return curator.getData(provisionStatePath(provisionId));
}
@SuppressWarnings("unused")
public void writeProvisionState(String provisionId, byte[] data) {
curator.set(provisionStatePath(provisionId), data);
}
@SuppressWarnings("unused")
public List<String> readProvisionStateIds() {
return curator.getChildren(provisionStatePath());
}
@SuppressWarnings("unused")
public Optional<byte[]> readVespaServerPool() {
return curator.getData(vespaServerPoolPath());
}
@SuppressWarnings("unused")
public void writeVespaServerPool(byte[] data) {
curator.set(vespaServerPoolPath(), data);
}
@SuppressWarnings("unused")
public Optional<byte[]> readOpenStackServerPool() {
return curator.getData(openStackServerPoolPath());
}
@SuppressWarnings("unused")
public void writeOpenStackServerPool(byte[] data) {
curator.set(openStackServerPoolPath(), data);
}
private Path lockPath(TenantName tenant) {
Path lockPath = lockRoot
.append(tenant.value());
curator.create(lockPath);
return lockPath;
}
private Path lockPath(ApplicationId application) {
Path lockPath = lockRoot
.append(application.tenant().value())
.append(application.application().value())
.append(application.instance().value());
curator.create(lockPath);
return lockPath;
}
private Path lockPath(String provisionId) {
Path lockPath = lockRoot
.append(provisionStatePath())
.append(provisionId);
curator.create(lockPath);
return lockPath;
}
private static Path inactiveJobsPath() {
return root.append("inactiveJobs");
}
private static Path upgradesPerMinutePath() {
return root.append("upgrader").append("upgradesPerMinute");
}
private static Path confidenceOverridesPath() {
return root.append("upgrader").append("confidenceOverrides");
}
private static Path versionStatusPath() {
return root.append("versionStatus");
}
private static Path provisionStatePath() {
return root.append("provisioning").append("states");
}
private static Path provisionStatePath(String provisionId) {
return provisionStatePath().append(provisionId);
}
private static Path vespaServerPoolPath() {
return root.append("vespaServerPool");
}
private static Path openStackServerPoolPath() {
return root.append("openStackServerPool");
}
private static Path tenantPath(TenantName name) {
return tenantRoot.append(name.value());
}
private static Path applicationPath(ApplicationId application) {
return applicationRoot.append(application.serializedForm());
}
private static Path controllerPath(String hostname) {
return controllerRoot.append(hostname);
}
} |
Fixed. | public List<HostName> cluster() {
return Arrays.stream(zooKeeperEnsembleConnectionSpec.split(","))
.filter(hostPort -> !hostPort.isEmpty())
.map(hostPort -> hostPort.contains(":")
? hostPort.substring(0, hostPort.indexOf(":"))
: hostPort)
.map(HostName::from)
.collect(Collectors.toList());
} | : hostPort) | public List<HostName> cluster() {
return Arrays.stream(curator.zooKeeperEnsembleConnectionSpec().split(","))
.filter(hostAndPort -> !hostAndPort.isEmpty())
.map(hostAndPort -> hostAndPort.split(":")[0])
.map(HostName::from)
.collect(Collectors.toList());
} | class CuratorDb {
private static final Logger log = Logger.getLogger(CuratorDb.class.getName());
private static final Duration defaultLockTimeout = Duration.ofMinutes(5);
private static final Path root = Path.fromString("/controller/v1");
private static final Path lockRoot = root.append("locks");
private static final Path tenantRoot = root.append("tenants");
private static final Path applicationRoot = root.append("applications");
private static final Path controllerRoot = root.append("controllers");
private final StringSetSerializer stringSetSerializer = new StringSetSerializer();
private final VersionStatusSerializer versionStatusSerializer = new VersionStatusSerializer();
private final VersionSerializer versionSerializer = new VersionSerializer();
private final ConfidenceOverrideSerializer confidenceOverrideSerializer = new ConfidenceOverrideSerializer();
private final TenantSerializer tenantSerializer = new TenantSerializer();
private final ApplicationSerializer applicationSerializer = new ApplicationSerializer();
private final Curator curator;
private final String zooKeeperEnsembleConnectionSpec;
/**
* All keys, to allow reentrancy.
* This will grow forever, but this should be too slow to be a problem.
*/
private final ConcurrentHashMap<Path, Lock> locks = new ConcurrentHashMap<>();
@Inject
public CuratorDb(Curator curator) {
this(curator, curator.zooKeeperEnsembleConnectionSpec());
}
CuratorDb(Curator curator, String zooKeeperEnsembleConnectionSpec) {
this.curator = curator;
this.zooKeeperEnsembleConnectionSpec = zooKeeperEnsembleConnectionSpec;
}
/** Returns all hosts configured to be part of this ZooKeeper cluster */
public Lock lock(TenantName name, Duration timeout) {
return lock(lockPath(name), timeout);
}
public Lock lock(ApplicationId id, Duration timeout) {
return lock(lockPath(id), timeout);
}
public Lock lockRotations() {
return lock(lockRoot.append("rotations"), defaultLockTimeout);
}
/** Create a reentrant lock */
private Lock lock(Path path, Duration timeout) {
Lock lock = locks.computeIfAbsent(path, (pathArg) -> new Lock(pathArg.getAbsolute(), curator));
lock.acquire(timeout);
return lock;
}
public Lock lockInactiveJobs() {
return lock(lockRoot.append("inactiveJobsLock"), defaultLockTimeout);
}
public Lock lockMaintenanceJob(String jobName) {
return lock(lockRoot.append("maintenanceJobLocks").append(jobName), Duration.ofSeconds(1));
}
@SuppressWarnings("unused")
public Lock lockProvisionState(String provisionStateId) {
return lock(lockPath(provisionStateId), Duration.ofSeconds(1));
}
@SuppressWarnings("unused")
public Lock lockVespaServerPool() {
return lock(lockRoot.append("vespaServerPoolLock"), Duration.ofSeconds(1));
}
@SuppressWarnings("unused")
public Lock lockOpenStackServerPool() {
return lock(lockRoot.append("openStackServerPoolLock"), Duration.ofSeconds(1));
}
private <T> Optional<T> read(Path path, Function<byte[], T> mapper) {
return curator.getData(path).filter(data -> data.length > 0).map(mapper);
}
private Optional<Slime> readSlime(Path path) {
return read(path, SlimeUtils::jsonToSlime);
}
private static byte[] asJson(Slime slime) {
try {
return SlimeUtils.toJsonBytes(slime);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
public Set<String> readInactiveJobs() {
try {
return readSlime(inactiveJobsPath()).map(stringSetSerializer::fromSlime).orElseGet(HashSet::new);
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Error reading inactive jobs, deleting inactive state");
writeInactiveJobs(Collections.emptySet());
return new HashSet<>();
}
}
public void writeInactiveJobs(Set<String> inactiveJobs) {
curator.set(inactiveJobsPath(), stringSetSerializer.toJson(inactiveJobs));
}
public double readUpgradesPerMinute() {
return read(upgradesPerMinutePath(), ByteBuffer::wrap).map(ByteBuffer::getDouble).orElse(0.5);
}
public void writeUpgradesPerMinute(double n) {
if (n < 0) {
throw new IllegalArgumentException("Upgrades per minute must be >= 0");
}
curator.set(upgradesPerMinutePath(), ByteBuffer.allocate(Double.BYTES).putDouble(n).array());
}
public void writeVersionStatus(VersionStatus status) {
curator.set(versionStatusPath(), asJson(versionStatusSerializer.toSlime(status)));
}
public VersionStatus readVersionStatus() {
return readSlime(versionStatusPath()).map(versionStatusSerializer::fromSlime).orElseGet(VersionStatus::empty);
}
public void writeConfidenceOverrides(Map<Version, VespaVersion.Confidence> overrides) {
curator.set(confidenceOverridesPath(), asJson(confidenceOverrideSerializer.toSlime(overrides)));
}
public Map<Version, VespaVersion.Confidence> readConfidenceOverrides() {
return readSlime(confidenceOverridesPath()).map(confidenceOverrideSerializer::fromSlime)
.orElseGet(Collections::emptyMap);
}
public void writeControllerVersion(HostName hostname, Version version) {
curator.set(controllerPath(hostname.value()), asJson(versionSerializer.toSlime(version)));
}
public Version readControllerVersion(HostName hostname) {
return readSlime(controllerPath(hostname.value()))
.map(versionSerializer::fromSlime)
.orElse(Vtag.currentVersion);
}
public void writeTenant(UserTenant tenant) {
curator.set(tenantPath(tenant.name()), asJson(tenantSerializer.toSlime(tenant)));
}
public Optional<UserTenant> readUserTenant(TenantName name) {
return readSlime(tenantPath(name)).map(tenantSerializer::userTenantFrom);
}
public void writeTenant(AthenzTenant tenant) {
curator.set(tenantPath(tenant.name()), asJson(tenantSerializer.toSlime(tenant)));
}
public Optional<AthenzTenant> readAthenzTenant(TenantName name) {
return readSlime(tenantPath(name)).map(tenantSerializer::athenzTenantFrom);
}
public Optional<Tenant> readTenant(TenantName name) {
if (name.value().startsWith(Tenant.userPrefix)) {
return readUserTenant(name).map(Tenant.class::cast);
}
return readAthenzTenant(name).map(Tenant.class::cast);
}
public List<Tenant> readTenants() {
return curator.getChildren(tenantRoot).stream()
.map(TenantName::from)
.map(this::readTenant)
.filter(Optional::isPresent)
.map(Optional::get)
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
public void removeTenant(TenantName name) {
curator.delete(tenantPath(name));
}
public void writeApplication(Application application) {
curator.set(applicationPath(application.id()), asJson(applicationSerializer.toSlime(application)));
}
public Optional<Application> readApplication(ApplicationId application) {
return readSlime(applicationPath(application)).map(applicationSerializer::fromSlime);
}
public List<Application> readApplications() {
return readApplications(ignored -> true);
}
public List<Application> readApplications(TenantName name) {
return readApplications(application -> application.tenant().equals(name));
}
private List<Application> readApplications(Predicate<ApplicationId> applicationFilter) {
return curator.getChildren(applicationRoot).stream()
.map(ApplicationId::fromSerializedForm)
.filter(applicationFilter)
.map(this::readApplication)
.filter(Optional::isPresent)
.map(Optional::get)
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
public void removeApplication(ApplicationId application) {
curator.delete(applicationPath(application));
}
@SuppressWarnings("unused")
public Optional<byte[]> readProvisionState(String provisionId) {
return curator.getData(provisionStatePath(provisionId));
}
@SuppressWarnings("unused")
public void writeProvisionState(String provisionId, byte[] data) {
curator.set(provisionStatePath(provisionId), data);
}
@SuppressWarnings("unused")
public List<String> readProvisionStateIds() {
return curator.getChildren(provisionStatePath());
}
@SuppressWarnings("unused")
public Optional<byte[]> readVespaServerPool() {
return curator.getData(vespaServerPoolPath());
}
@SuppressWarnings("unused")
public void writeVespaServerPool(byte[] data) {
curator.set(vespaServerPoolPath(), data);
}
@SuppressWarnings("unused")
public Optional<byte[]> readOpenStackServerPool() {
return curator.getData(openStackServerPoolPath());
}
@SuppressWarnings("unused")
public void writeOpenStackServerPool(byte[] data) {
curator.set(openStackServerPoolPath(), data);
}
private Path lockPath(TenantName tenant) {
Path lockPath = lockRoot
.append(tenant.value());
curator.create(lockPath);
return lockPath;
}
private Path lockPath(ApplicationId application) {
Path lockPath = lockRoot
.append(application.tenant().value())
.append(application.application().value())
.append(application.instance().value());
curator.create(lockPath);
return lockPath;
}
private Path lockPath(String provisionId) {
Path lockPath = lockRoot
.append(provisionStatePath())
.append(provisionId);
curator.create(lockPath);
return lockPath;
}
private static Path inactiveJobsPath() {
return root.append("inactiveJobs");
}
private static Path upgradesPerMinutePath() {
return root.append("upgrader").append("upgradesPerMinute");
}
private static Path confidenceOverridesPath() {
return root.append("upgrader").append("confidenceOverrides");
}
private static Path versionStatusPath() {
return root.append("versionStatus");
}
private static Path provisionStatePath() {
return root.append("provisioning").append("states");
}
private static Path provisionStatePath(String provisionId) {
return provisionStatePath().append(provisionId);
}
private static Path vespaServerPoolPath() {
return root.append("vespaServerPool");
}
private static Path openStackServerPoolPath() {
return root.append("openStackServerPool");
}
private static Path tenantPath(TenantName name) {
return tenantRoot.append(name.value());
}
private static Path applicationPath(ApplicationId application) {
return applicationRoot.append(application.serializedForm());
}
private static Path controllerPath(String hostname) {
return controllerRoot.append(hostname);
}
} | class CuratorDb {
private static final Logger log = Logger.getLogger(CuratorDb.class.getName());
private static final Duration defaultLockTimeout = Duration.ofMinutes(5);
private static final Path root = Path.fromString("/controller/v1");
private static final Path lockRoot = root.append("locks");
private static final Path tenantRoot = root.append("tenants");
private static final Path applicationRoot = root.append("applications");
private static final Path controllerRoot = root.append("controllers");
private final StringSetSerializer stringSetSerializer = new StringSetSerializer();
private final VersionStatusSerializer versionStatusSerializer = new VersionStatusSerializer();
private final VersionSerializer versionSerializer = new VersionSerializer();
private final ConfidenceOverrideSerializer confidenceOverrideSerializer = new ConfidenceOverrideSerializer();
private final TenantSerializer tenantSerializer = new TenantSerializer();
private final ApplicationSerializer applicationSerializer = new ApplicationSerializer();
private final Curator curator;
/**
* All keys, to allow reentrancy.
* This will grow forever, but this should be too slow to be a problem.
*/
private final ConcurrentHashMap<Path, Lock> locks = new ConcurrentHashMap<>();
@Inject
public CuratorDb(Curator curator) {
this.curator = curator;
}
/** Returns all hosts configured to be part of this ZooKeeper cluster */
public Lock lock(TenantName name, Duration timeout) {
return lock(lockPath(name), timeout);
}
public Lock lock(ApplicationId id, Duration timeout) {
return lock(lockPath(id), timeout);
}
public Lock lockRotations() {
return lock(lockRoot.append("rotations"), defaultLockTimeout);
}
/** Create a reentrant lock */
private Lock lock(Path path, Duration timeout) {
Lock lock = locks.computeIfAbsent(path, (pathArg) -> new Lock(pathArg.getAbsolute(), curator));
lock.acquire(timeout);
return lock;
}
public Lock lockInactiveJobs() {
return lock(lockRoot.append("inactiveJobsLock"), defaultLockTimeout);
}
public Lock lockMaintenanceJob(String jobName) {
return lock(lockRoot.append("maintenanceJobLocks").append(jobName), Duration.ofSeconds(1));
}
@SuppressWarnings("unused")
public Lock lockProvisionState(String provisionStateId) {
return lock(lockPath(provisionStateId), Duration.ofSeconds(1));
}
@SuppressWarnings("unused")
public Lock lockVespaServerPool() {
return lock(lockRoot.append("vespaServerPoolLock"), Duration.ofSeconds(1));
}
@SuppressWarnings("unused")
public Lock lockOpenStackServerPool() {
return lock(lockRoot.append("openStackServerPoolLock"), Duration.ofSeconds(1));
}
private <T> Optional<T> read(Path path, Function<byte[], T> mapper) {
return curator.getData(path).filter(data -> data.length > 0).map(mapper);
}
private Optional<Slime> readSlime(Path path) {
return read(path, SlimeUtils::jsonToSlime);
}
private static byte[] asJson(Slime slime) {
try {
return SlimeUtils.toJsonBytes(slime);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
public Set<String> readInactiveJobs() {
try {
return readSlime(inactiveJobsPath()).map(stringSetSerializer::fromSlime).orElseGet(HashSet::new);
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Error reading inactive jobs, deleting inactive state");
writeInactiveJobs(Collections.emptySet());
return new HashSet<>();
}
}
public void writeInactiveJobs(Set<String> inactiveJobs) {
curator.set(inactiveJobsPath(), stringSetSerializer.toJson(inactiveJobs));
}
public double readUpgradesPerMinute() {
return read(upgradesPerMinutePath(), ByteBuffer::wrap).map(ByteBuffer::getDouble).orElse(0.5);
}
public void writeUpgradesPerMinute(double n) {
if (n < 0) {
throw new IllegalArgumentException("Upgrades per minute must be >= 0");
}
curator.set(upgradesPerMinutePath(), ByteBuffer.allocate(Double.BYTES).putDouble(n).array());
}
public void writeVersionStatus(VersionStatus status) {
curator.set(versionStatusPath(), asJson(versionStatusSerializer.toSlime(status)));
}
public VersionStatus readVersionStatus() {
return readSlime(versionStatusPath()).map(versionStatusSerializer::fromSlime).orElseGet(VersionStatus::empty);
}
public void writeConfidenceOverrides(Map<Version, VespaVersion.Confidence> overrides) {
curator.set(confidenceOverridesPath(), asJson(confidenceOverrideSerializer.toSlime(overrides)));
}
public Map<Version, VespaVersion.Confidence> readConfidenceOverrides() {
return readSlime(confidenceOverridesPath()).map(confidenceOverrideSerializer::fromSlime)
.orElseGet(Collections::emptyMap);
}
public void writeControllerVersion(HostName hostname, Version version) {
curator.set(controllerPath(hostname.value()), asJson(versionSerializer.toSlime(version)));
}
public Version readControllerVersion(HostName hostname) {
return readSlime(controllerPath(hostname.value()))
.map(versionSerializer::fromSlime)
.orElse(Vtag.currentVersion);
}
public void writeTenant(UserTenant tenant) {
curator.set(tenantPath(tenant.name()), asJson(tenantSerializer.toSlime(tenant)));
}
public Optional<UserTenant> readUserTenant(TenantName name) {
return readSlime(tenantPath(name)).map(tenantSerializer::userTenantFrom);
}
public void writeTenant(AthenzTenant tenant) {
curator.set(tenantPath(tenant.name()), asJson(tenantSerializer.toSlime(tenant)));
}
public Optional<AthenzTenant> readAthenzTenant(TenantName name) {
return readSlime(tenantPath(name)).map(tenantSerializer::athenzTenantFrom);
}
public Optional<Tenant> readTenant(TenantName name) {
if (name.value().startsWith(Tenant.userPrefix)) {
return readUserTenant(name).map(Tenant.class::cast);
}
return readAthenzTenant(name).map(Tenant.class::cast);
}
public List<Tenant> readTenants() {
return curator.getChildren(tenantRoot).stream()
.map(TenantName::from)
.map(this::readTenant)
.filter(Optional::isPresent)
.map(Optional::get)
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
public void removeTenant(TenantName name) {
curator.delete(tenantPath(name));
}
public void writeApplication(Application application) {
curator.set(applicationPath(application.id()), asJson(applicationSerializer.toSlime(application)));
}
public Optional<Application> readApplication(ApplicationId application) {
return readSlime(applicationPath(application)).map(applicationSerializer::fromSlime);
}
public List<Application> readApplications() {
return readApplications(ignored -> true);
}
public List<Application> readApplications(TenantName name) {
return readApplications(application -> application.tenant().equals(name));
}
private List<Application> readApplications(Predicate<ApplicationId> applicationFilter) {
return curator.getChildren(applicationRoot).stream()
.map(ApplicationId::fromSerializedForm)
.filter(applicationFilter)
.map(this::readApplication)
.filter(Optional::isPresent)
.map(Optional::get)
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
public void removeApplication(ApplicationId application) {
curator.delete(applicationPath(application));
}
@SuppressWarnings("unused")
public Optional<byte[]> readProvisionState(String provisionId) {
return curator.getData(provisionStatePath(provisionId));
}
@SuppressWarnings("unused")
public void writeProvisionState(String provisionId, byte[] data) {
curator.set(provisionStatePath(provisionId), data);
}
@SuppressWarnings("unused")
public List<String> readProvisionStateIds() {
return curator.getChildren(provisionStatePath());
}
@SuppressWarnings("unused")
public Optional<byte[]> readVespaServerPool() {
return curator.getData(vespaServerPoolPath());
}
@SuppressWarnings("unused")
public void writeVespaServerPool(byte[] data) {
curator.set(vespaServerPoolPath(), data);
}
@SuppressWarnings("unused")
public Optional<byte[]> readOpenStackServerPool() {
return curator.getData(openStackServerPoolPath());
}
@SuppressWarnings("unused")
public void writeOpenStackServerPool(byte[] data) {
curator.set(openStackServerPoolPath(), data);
}
private Path lockPath(TenantName tenant) {
Path lockPath = lockRoot
.append(tenant.value());
curator.create(lockPath);
return lockPath;
}
private Path lockPath(ApplicationId application) {
Path lockPath = lockRoot
.append(application.tenant().value())
.append(application.application().value())
.append(application.instance().value());
curator.create(lockPath);
return lockPath;
}
private Path lockPath(String provisionId) {
Path lockPath = lockRoot
.append(provisionStatePath())
.append(provisionId);
curator.create(lockPath);
return lockPath;
}
private static Path inactiveJobsPath() {
return root.append("inactiveJobs");
}
private static Path upgradesPerMinutePath() {
return root.append("upgrader").append("upgradesPerMinute");
}
private static Path confidenceOverridesPath() {
return root.append("upgrader").append("confidenceOverrides");
}
private static Path versionStatusPath() {
return root.append("versionStatus");
}
private static Path provisionStatePath() {
return root.append("provisioning").append("states");
}
private static Path provisionStatePath(String provisionId) {
return provisionStatePath().append(provisionId);
}
private static Path vespaServerPoolPath() {
return root.append("vespaServerPool");
}
private static Path openStackServerPoolPath() {
return root.append("openStackServerPool");
}
private static Path tenantPath(TenantName name) {
return tenantRoot.append(name.value());
}
private static Path applicationPath(ApplicationId application) {
return applicationRoot.append(application.serializedForm());
}
private static Path controllerPath(String hostname) {
return controllerRoot.append(hostname);
}
} |
I think this would be easier to read if it was all put in the `maintain()`, as ```java VespaVersion controllerVersion = controller().versionStatus().controllerVersion(); if ( ! controllerVersion.isCurrentSystemVersion()) { // upgrade with controllerVersion.versionNumber() } ``` | private Optional<Version> targetVersion() {
return controller().versionStatus().controllerVersion()
.map(VespaVersion::versionNumber)
.filter(controllerVersion -> controller().versionStatus().systemVersion()
.map(VespaVersion::versionNumber)
.filter(systemVersion -> systemVersion.isBefore(controllerVersion))
.isPresent());
} | .isPresent()); | private Optional<Version> targetVersion() {
return controller().versionStatus().controllerVersion()
.filter(vespaVersion -> !vespaVersion.isSystemVersion())
.map(VespaVersion::versionNumber);
} | class SystemUpgrader extends Maintainer {
private static final Logger log = Logger.getLogger(SystemUpgrader.class.getName());
public SystemUpgrader(Controller controller, Duration interval, JobControl jobControl) {
super(controller, interval, jobControl);
}
@Override
protected void maintain() {
Optional<Version> target = targetVersion();
if (!target.isPresent()) {
return;
}
for (List<ZoneId> zones : controller().zoneRegistry().upgradePolicy().asList()) {
if (!completeUpgrade(zones, target.get())) {
break;
}
}
}
/** Returns true if upgrade of given zones is complete */
private boolean completeUpgrade(List<ZoneId> zones, Version version) {
boolean completed = true;
for (ZoneId zone : zones) {
startUpgrade(zone, version);
completed = completed && !isUpgrading(zone);
}
return completed;
}
/** Returns true if any config servers in given zone are upgrading */
private boolean isUpgrading(ZoneId zone) {
return configServerUris(zone).stream().anyMatch(uri -> controller().configServer().version(uri).upgrading());
}
/** Schedule upgrade of config servers in given zone, if necessary */
private void startUpgrade(ZoneId zone, Version version) {
configServerUris(zone).stream()
.filter(uri -> !controller().configServer().version(uri).wanted().equals(version))
.peek(uri -> log.info(String.format("Upgrading config server %s in %s", uri.getHost(),
zone)))
.forEach(uri -> controller().configServer().upgrade(uri, version));
}
/** Returns target version for the system. This is the controller version, if it's newer than the system version */
private List<URI> configServerUris(ZoneId zone) {
return controller().zoneRegistry().getConfigServerUris(zone);
}
} | class SystemUpgrader extends Maintainer {
private static final Logger log = Logger.getLogger(SystemUpgrader.class.getName());
public SystemUpgrader(Controller controller, Duration interval, JobControl jobControl) {
super(controller, interval, jobControl);
}
@Override
protected void maintain() {
Optional<Version> target = targetVersion();
if (!target.isPresent()) {
return;
}
for (List<ZoneId> zones : controller().zoneRegistry().upgradePolicy().asList()) {
if (!completeUpgrade(zones, target.get())) {
break;
}
}
}
/** Returns true if upgrade of given zones is complete */
private boolean completeUpgrade(List<ZoneId> zones, Version version) {
boolean completed = true;
for (ZoneId zone : zones) {
startUpgrade(zone, version);
completed = completed && !isUpgrading(zone);
}
return completed;
}
/** Returns true if any config servers in given zone are upgrading */
private boolean isUpgrading(ZoneId zone) {
return configServerUris(zone).stream().anyMatch(uri -> controller().configServer().version(uri).upgrading());
}
/** Schedule upgrade of config servers in given zone, if necessary */
private void startUpgrade(ZoneId zone, Version version) {
configServerUris(zone).stream()
.filter(uri -> !controller().configServer().version(uri).wanted().equals(version))
.peek(uri -> log.info(String.format("Upgrading config server %s in %s", uri.getHost(),
zone)))
.forEach(uri -> controller().configServer().upgrade(uri, version));
}
/** Returns target version for the system */
private List<URI> configServerUris(ZoneId zone) {
return controller().zoneRegistry().getConfigServerUris(zone);
}
} |
Consider ``` Optional.ofNullable(attributes.getDockerImage()).ifPresent(this::currentDockerImage); Optional.ofNullable(attributes.getHardwareDivergence()).ifPresent(this::hardwareDivergence); Optional.ofNullable(attributes.getRebootGeneration()).ifPresent(this::currentRebootGeneration); Optional.ofNullable(attributes.getRestartGeneration()).ifPresent(this::currentRestartGeneration); ``` | public Builder updateFromNodeAttributes(NodeAttributes attributes) {
if (attributes.getDockerImage() != null) {
currentDockerImage = Optional.of(attributes.getDockerImage());
}
if (attributes.getHardwareDivergence() != null) {
hardwareDivergence = Optional.of(attributes.getHardwareDivergence());
}
if (attributes.getRebootGeneration() != null) {
currentRebootGeneration = attributes.getRebootGeneration();
}
if (attributes.getRestartGeneration() != null) {
currentRestartGeneration = Optional.of(attributes.getRestartGeneration());
}
return this;
} | if (attributes.getDockerImage() != null) { | public Builder updateFromNodeAttributes(NodeAttributes attributes) {
if (attributes.getDockerImage() != null) {
currentDockerImage = Optional.of(attributes.getDockerImage());
}
if (attributes.getHardwareDivergence() != null) {
hardwareDivergence = Optional.of(attributes.getHardwareDivergence());
}
if (attributes.getRebootGeneration() != null) {
currentRebootGeneration = attributes.getRebootGeneration();
}
if (attributes.getRestartGeneration() != null) {
currentRestartGeneration = Optional.of(attributes.getRestartGeneration());
}
return this;
} | class Builder {
private String hostname;
private Optional<DockerImage> wantedDockerImage = Optional.empty();
private Optional<DockerImage> currentDockerImage = Optional.empty();
private Node.State nodeState;
private NodeType nodeType;
private String nodeFlavor;
private String nodeCanonicalFlavor;
private Optional<String> wantedVespaVersion = Optional.empty();
private Optional<String> vespaVersion = Optional.empty();
private Optional<Boolean> allowedToBeDown = Optional.empty();
private Optional<Owner> owner = Optional.empty();
private Optional<Membership> membership = Optional.empty();
private Optional<Long> wantedRestartGeneration = Optional.empty();
private Optional<Long> currentRestartGeneration = Optional.empty();
private long wantedRebootGeneration;
private long currentRebootGeneration;
private double minCpuCores;
private double minMainMemoryAvailableGb;
private double minDiskAvailableGb;
private boolean fastDisk = false;
private Set<String> ipAddresses = Collections.emptySet();
private Optional<String> hardwareDivergence = Optional.empty();
private Optional<String> parentHostname = Optional.empty();
public Builder() {}
public Builder(NodeSpec node) {
hostname(node.hostname);
nodeState(node.nodeState);
nodeType(node.nodeType);
nodeFlavor(node.nodeFlavor);
nodeCanonicalFlavor(node.nodeCanonicalFlavor);
minCpuCores(node.minCpuCores);
minMainMemoryAvailableGb(node.minMainMemoryAvailableGb);
minDiskAvailableGb(node.minDiskAvailableGb);
fastDisk(node.fastDisk);
ipAddresses(node.ipAddresses);
wantedRebootGeneration(node.wantedRebootGeneration);
currentRebootGeneration(node.currentRebootGeneration);
node.wantedDockerImage.ifPresent(this::wantedDockerImage);
node.currentDockerImage.ifPresent(this::currentDockerImage);
node.wantedVespaVersion.ifPresent(this::wantedVespaVersion);
node.vespaVersion.ifPresent(this::vespaVersion);
node.allowedToBeDown.ifPresent(this::allowedToBeDown);
node.owner.ifPresent(this::owner);
node.membership.ifPresent(this::membership);
node.wantedRestartGeneration.ifPresent(this::wantedRestartGeneration);
node.currentRestartGeneration.ifPresent(this::currentRestartGeneration);
node.hardwareDivergence.ifPresent(this::hardwareDivergence);
node.parentHostname.ifPresent(this::parentHostname);
}
public Builder hostname(String hostname) {
this.hostname = hostname;
return this;
}
public Builder wantedDockerImage(DockerImage wantedDockerImage) {
this.wantedDockerImage = Optional.of(wantedDockerImage);
return this;
}
public Builder currentDockerImage(DockerImage currentDockerImage) {
this.currentDockerImage = Optional.of(currentDockerImage);
return this;
}
public Builder nodeState(Node.State nodeState) {
this.nodeState = nodeState;
return this;
}
public Builder nodeType(NodeType nodeType) {
this.nodeType = nodeType;
return this;
}
public Builder nodeFlavor(String nodeFlavor) {
this.nodeFlavor = nodeFlavor;
return this;
}
public Builder nodeCanonicalFlavor(String nodeCanonicalFlavor) {
this.nodeCanonicalFlavor = nodeCanonicalFlavor;
return this;
}
public Builder wantedVespaVersion(String wantedVespaVersion) {
this.wantedVespaVersion = Optional.of(wantedVespaVersion);
return this;
}
public Builder vespaVersion(String vespaVersion) {
this.vespaVersion = Optional.of(vespaVersion);
return this;
}
public Builder allowedToBeDown(boolean allowedToBeDown) {
this.allowedToBeDown = Optional.of(allowedToBeDown);
return this;
}
public Builder owner(Owner owner) {
this.owner = Optional.of(owner);
return this;
}
public Builder membership(Membership membership) {
this.membership = Optional.of(membership);
return this;
}
public Builder wantedRestartGeneration(long wantedRestartGeneration) {
this.wantedRestartGeneration = Optional.of(wantedRestartGeneration);
return this;
}
public Builder currentRestartGeneration(long currentRestartGeneration) {
this.currentRestartGeneration = Optional.of(currentRestartGeneration);
return this;
}
public Builder wantedRebootGeneration(long wantedRebootGeneration) {
this.wantedRebootGeneration = wantedRebootGeneration;
return this;
}
public Builder currentRebootGeneration(long currentRebootGeneration) {
this.currentRebootGeneration = currentRebootGeneration;
return this;
}
public Builder minCpuCores(double minCpuCores) {
this.minCpuCores = minCpuCores;
return this;
}
public Builder minMainMemoryAvailableGb(double minMainMemoryAvailableGb) {
this.minMainMemoryAvailableGb = minMainMemoryAvailableGb;
return this;
}
public Builder minDiskAvailableGb(double minDiskAvailableGb) {
this.minDiskAvailableGb = minDiskAvailableGb;
return this;
}
public Builder fastDisk(boolean fastDisk) {
this.fastDisk = fastDisk;
return this;
}
public Builder ipAddresses(Set<String> ipAddresses) {
this.ipAddresses = ipAddresses;
return this;
}
public Builder hardwareDivergence(String hardwareDivergence) {
this.hardwareDivergence = Optional.of(hardwareDivergence);
return this;
}
public Builder parentHostname(String parentHostname) {
this.parentHostname = Optional.of(parentHostname);
return this;
}
public String getHostname() {
return hostname;
}
public Optional<DockerImage> getWantedDockerImage() {
return wantedDockerImage;
}
public Optional<DockerImage> getCurrentDockerImage() {
return currentDockerImage;
}
public Node.State getNodeState() {
return nodeState;
}
public NodeType getNodeType() {
return nodeType;
}
public String getNodeFlavor() {
return nodeFlavor;
}
public String getNodeCanonicalFlavor() {
return nodeCanonicalFlavor;
}
public Optional<String> getWantedVespaVersion() {
return wantedVespaVersion;
}
public Optional<String> getVespaVersion() {
return vespaVersion;
}
public Optional<Boolean> getAllowedToBeDown() {
return allowedToBeDown;
}
public Optional<Owner> getOwner() {
return owner;
}
public Optional<Membership> getMembership() {
return membership;
}
public Optional<Long> getWantedRestartGeneration() {
return wantedRestartGeneration;
}
public Optional<Long> getCurrentRestartGeneration() {
return currentRestartGeneration;
}
public long getWantedRebootGeneration() {
return wantedRebootGeneration;
}
public long getCurrentRebootGeneration() {
return currentRebootGeneration;
}
public double getMinCpuCores() {
return minCpuCores;
}
public double getMinMainMemoryAvailableGb() {
return minMainMemoryAvailableGb;
}
public double getMinDiskAvailableGb() {
return minDiskAvailableGb;
}
public boolean isFastDisk() {
return fastDisk;
}
public Set<String> getIpAddresses() {
return ipAddresses;
}
public Optional<String> getHardwareDivergence() {
return hardwareDivergence;
}
public Optional<String> getParentHostname() {
return parentHostname;
}
public NodeSpec build() {
return new NodeSpec(hostname, wantedDockerImage, currentDockerImage, nodeState, nodeType,
nodeFlavor, nodeCanonicalFlavor,
wantedVespaVersion, vespaVersion, allowedToBeDown, owner, membership,
wantedRestartGeneration, currentRestartGeneration,
wantedRebootGeneration, currentRebootGeneration,
minCpuCores, minMainMemoryAvailableGb, minDiskAvailableGb,
fastDisk, ipAddresses, hardwareDivergence, parentHostname);
}
} | class Builder {
private String hostname;
private Optional<DockerImage> wantedDockerImage = Optional.empty();
private Optional<DockerImage> currentDockerImage = Optional.empty();
private Node.State nodeState;
private NodeType nodeType;
private String nodeFlavor;
private String nodeCanonicalFlavor;
private Optional<String> wantedVespaVersion = Optional.empty();
private Optional<String> vespaVersion = Optional.empty();
private Optional<Boolean> allowedToBeDown = Optional.empty();
private Optional<Owner> owner = Optional.empty();
private Optional<Membership> membership = Optional.empty();
private Optional<Long> wantedRestartGeneration = Optional.empty();
private Optional<Long> currentRestartGeneration = Optional.empty();
private long wantedRebootGeneration;
private long currentRebootGeneration;
private double minCpuCores;
private double minMainMemoryAvailableGb;
private double minDiskAvailableGb;
private boolean fastDisk = false;
private Set<String> ipAddresses = Collections.emptySet();
private Optional<String> hardwareDivergence = Optional.empty();
private Optional<String> parentHostname = Optional.empty();
public Builder() {}
public Builder(NodeSpec node) {
hostname(node.hostname);
nodeState(node.nodeState);
nodeType(node.nodeType);
nodeFlavor(node.nodeFlavor);
nodeCanonicalFlavor(node.nodeCanonicalFlavor);
minCpuCores(node.minCpuCores);
minMainMemoryAvailableGb(node.minMainMemoryAvailableGb);
minDiskAvailableGb(node.minDiskAvailableGb);
fastDisk(node.fastDisk);
ipAddresses(node.ipAddresses);
wantedRebootGeneration(node.wantedRebootGeneration);
currentRebootGeneration(node.currentRebootGeneration);
node.wantedDockerImage.ifPresent(this::wantedDockerImage);
node.currentDockerImage.ifPresent(this::currentDockerImage);
node.wantedVespaVersion.ifPresent(this::wantedVespaVersion);
node.vespaVersion.ifPresent(this::vespaVersion);
node.allowedToBeDown.ifPresent(this::allowedToBeDown);
node.owner.ifPresent(this::owner);
node.membership.ifPresent(this::membership);
node.wantedRestartGeneration.ifPresent(this::wantedRestartGeneration);
node.currentRestartGeneration.ifPresent(this::currentRestartGeneration);
node.hardwareDivergence.ifPresent(this::hardwareDivergence);
node.parentHostname.ifPresent(this::parentHostname);
}
public Builder hostname(String hostname) {
this.hostname = hostname;
return this;
}
public Builder wantedDockerImage(DockerImage wantedDockerImage) {
this.wantedDockerImage = Optional.of(wantedDockerImage);
return this;
}
public Builder currentDockerImage(DockerImage currentDockerImage) {
this.currentDockerImage = Optional.of(currentDockerImage);
return this;
}
public Builder nodeState(Node.State nodeState) {
this.nodeState = nodeState;
return this;
}
public Builder nodeType(NodeType nodeType) {
this.nodeType = nodeType;
return this;
}
public Builder nodeFlavor(String nodeFlavor) {
this.nodeFlavor = nodeFlavor;
return this;
}
public Builder nodeCanonicalFlavor(String nodeCanonicalFlavor) {
this.nodeCanonicalFlavor = nodeCanonicalFlavor;
return this;
}
public Builder wantedVespaVersion(String wantedVespaVersion) {
this.wantedVespaVersion = Optional.of(wantedVespaVersion);
return this;
}
public Builder vespaVersion(String vespaVersion) {
this.vespaVersion = Optional.of(vespaVersion);
return this;
}
public Builder allowedToBeDown(boolean allowedToBeDown) {
this.allowedToBeDown = Optional.of(allowedToBeDown);
return this;
}
public Builder owner(Owner owner) {
this.owner = Optional.of(owner);
return this;
}
public Builder membership(Membership membership) {
this.membership = Optional.of(membership);
return this;
}
public Builder wantedRestartGeneration(long wantedRestartGeneration) {
this.wantedRestartGeneration = Optional.of(wantedRestartGeneration);
return this;
}
public Builder currentRestartGeneration(long currentRestartGeneration) {
this.currentRestartGeneration = Optional.of(currentRestartGeneration);
return this;
}
public Builder wantedRebootGeneration(long wantedRebootGeneration) {
this.wantedRebootGeneration = wantedRebootGeneration;
return this;
}
public Builder currentRebootGeneration(long currentRebootGeneration) {
this.currentRebootGeneration = currentRebootGeneration;
return this;
}
public Builder minCpuCores(double minCpuCores) {
this.minCpuCores = minCpuCores;
return this;
}
public Builder minMainMemoryAvailableGb(double minMainMemoryAvailableGb) {
this.minMainMemoryAvailableGb = minMainMemoryAvailableGb;
return this;
}
public Builder minDiskAvailableGb(double minDiskAvailableGb) {
this.minDiskAvailableGb = minDiskAvailableGb;
return this;
}
public Builder fastDisk(boolean fastDisk) {
this.fastDisk = fastDisk;
return this;
}
public Builder ipAddresses(Set<String> ipAddresses) {
this.ipAddresses = ipAddresses;
return this;
}
public Builder hardwareDivergence(String hardwareDivergence) {
this.hardwareDivergence = Optional.of(hardwareDivergence);
return this;
}
public Builder parentHostname(String parentHostname) {
this.parentHostname = Optional.of(parentHostname);
return this;
}
public String getHostname() {
return hostname;
}
public Optional<DockerImage> getWantedDockerImage() {
return wantedDockerImage;
}
public Optional<DockerImage> getCurrentDockerImage() {
return currentDockerImage;
}
public Node.State getNodeState() {
return nodeState;
}
public NodeType getNodeType() {
return nodeType;
}
public String getNodeFlavor() {
return nodeFlavor;
}
public String getNodeCanonicalFlavor() {
return nodeCanonicalFlavor;
}
public Optional<String> getWantedVespaVersion() {
return wantedVespaVersion;
}
public Optional<String> getVespaVersion() {
return vespaVersion;
}
public Optional<Boolean> getAllowedToBeDown() {
return allowedToBeDown;
}
public Optional<Owner> getOwner() {
return owner;
}
public Optional<Membership> getMembership() {
return membership;
}
public Optional<Long> getWantedRestartGeneration() {
return wantedRestartGeneration;
}
public Optional<Long> getCurrentRestartGeneration() {
return currentRestartGeneration;
}
public long getWantedRebootGeneration() {
return wantedRebootGeneration;
}
public long getCurrentRebootGeneration() {
return currentRebootGeneration;
}
public double getMinCpuCores() {
return minCpuCores;
}
public double getMinMainMemoryAvailableGb() {
return minMainMemoryAvailableGb;
}
public double getMinDiskAvailableGb() {
return minDiskAvailableGb;
}
public boolean isFastDisk() {
return fastDisk;
}
public Set<String> getIpAddresses() {
return ipAddresses;
}
public Optional<String> getHardwareDivergence() {
return hardwareDivergence;
}
public Optional<String> getParentHostname() {
return parentHostname;
}
public NodeSpec build() {
return new NodeSpec(hostname, wantedDockerImage, currentDockerImage, nodeState, nodeType,
nodeFlavor, nodeCanonicalFlavor,
wantedVespaVersion, vespaVersion, allowedToBeDown, owner, membership,
wantedRestartGeneration, currentRestartGeneration,
wantedRebootGeneration, currentRebootGeneration,
minCpuCores, minMainMemoryAvailableGb, minDiskAvailableGb,
fastDisk, ipAddresses, hardwareDivergence, parentHostname);
}
} |
Already considered | public Builder updateFromNodeAttributes(NodeAttributes attributes) {
if (attributes.getDockerImage() != null) {
currentDockerImage = Optional.of(attributes.getDockerImage());
}
if (attributes.getHardwareDivergence() != null) {
hardwareDivergence = Optional.of(attributes.getHardwareDivergence());
}
if (attributes.getRebootGeneration() != null) {
currentRebootGeneration = attributes.getRebootGeneration();
}
if (attributes.getRestartGeneration() != null) {
currentRestartGeneration = Optional.of(attributes.getRestartGeneration());
}
return this;
} | if (attributes.getDockerImage() != null) { | public Builder updateFromNodeAttributes(NodeAttributes attributes) {
if (attributes.getDockerImage() != null) {
currentDockerImage = Optional.of(attributes.getDockerImage());
}
if (attributes.getHardwareDivergence() != null) {
hardwareDivergence = Optional.of(attributes.getHardwareDivergence());
}
if (attributes.getRebootGeneration() != null) {
currentRebootGeneration = attributes.getRebootGeneration();
}
if (attributes.getRestartGeneration() != null) {
currentRestartGeneration = Optional.of(attributes.getRestartGeneration());
}
return this;
} | class Builder {
private String hostname;
private Optional<DockerImage> wantedDockerImage = Optional.empty();
private Optional<DockerImage> currentDockerImage = Optional.empty();
private Node.State nodeState;
private NodeType nodeType;
private String nodeFlavor;
private String nodeCanonicalFlavor;
private Optional<String> wantedVespaVersion = Optional.empty();
private Optional<String> vespaVersion = Optional.empty();
private Optional<Boolean> allowedToBeDown = Optional.empty();
private Optional<Owner> owner = Optional.empty();
private Optional<Membership> membership = Optional.empty();
private Optional<Long> wantedRestartGeneration = Optional.empty();
private Optional<Long> currentRestartGeneration = Optional.empty();
private long wantedRebootGeneration;
private long currentRebootGeneration;
private double minCpuCores;
private double minMainMemoryAvailableGb;
private double minDiskAvailableGb;
private boolean fastDisk = false;
private Set<String> ipAddresses = Collections.emptySet();
private Optional<String> hardwareDivergence = Optional.empty();
private Optional<String> parentHostname = Optional.empty();
public Builder() {}
public Builder(NodeSpec node) {
hostname(node.hostname);
nodeState(node.nodeState);
nodeType(node.nodeType);
nodeFlavor(node.nodeFlavor);
nodeCanonicalFlavor(node.nodeCanonicalFlavor);
minCpuCores(node.minCpuCores);
minMainMemoryAvailableGb(node.minMainMemoryAvailableGb);
minDiskAvailableGb(node.minDiskAvailableGb);
fastDisk(node.fastDisk);
ipAddresses(node.ipAddresses);
wantedRebootGeneration(node.wantedRebootGeneration);
currentRebootGeneration(node.currentRebootGeneration);
node.wantedDockerImage.ifPresent(this::wantedDockerImage);
node.currentDockerImage.ifPresent(this::currentDockerImage);
node.wantedVespaVersion.ifPresent(this::wantedVespaVersion);
node.vespaVersion.ifPresent(this::vespaVersion);
node.allowedToBeDown.ifPresent(this::allowedToBeDown);
node.owner.ifPresent(this::owner);
node.membership.ifPresent(this::membership);
node.wantedRestartGeneration.ifPresent(this::wantedRestartGeneration);
node.currentRestartGeneration.ifPresent(this::currentRestartGeneration);
node.hardwareDivergence.ifPresent(this::hardwareDivergence);
node.parentHostname.ifPresent(this::parentHostname);
}
public Builder hostname(String hostname) {
this.hostname = hostname;
return this;
}
public Builder wantedDockerImage(DockerImage wantedDockerImage) {
this.wantedDockerImage = Optional.of(wantedDockerImage);
return this;
}
public Builder currentDockerImage(DockerImage currentDockerImage) {
this.currentDockerImage = Optional.of(currentDockerImage);
return this;
}
public Builder nodeState(Node.State nodeState) {
this.nodeState = nodeState;
return this;
}
public Builder nodeType(NodeType nodeType) {
this.nodeType = nodeType;
return this;
}
public Builder nodeFlavor(String nodeFlavor) {
this.nodeFlavor = nodeFlavor;
return this;
}
public Builder nodeCanonicalFlavor(String nodeCanonicalFlavor) {
this.nodeCanonicalFlavor = nodeCanonicalFlavor;
return this;
}
public Builder wantedVespaVersion(String wantedVespaVersion) {
this.wantedVespaVersion = Optional.of(wantedVespaVersion);
return this;
}
public Builder vespaVersion(String vespaVersion) {
this.vespaVersion = Optional.of(vespaVersion);
return this;
}
public Builder allowedToBeDown(boolean allowedToBeDown) {
this.allowedToBeDown = Optional.of(allowedToBeDown);
return this;
}
public Builder owner(Owner owner) {
this.owner = Optional.of(owner);
return this;
}
public Builder membership(Membership membership) {
this.membership = Optional.of(membership);
return this;
}
public Builder wantedRestartGeneration(long wantedRestartGeneration) {
this.wantedRestartGeneration = Optional.of(wantedRestartGeneration);
return this;
}
public Builder currentRestartGeneration(long currentRestartGeneration) {
this.currentRestartGeneration = Optional.of(currentRestartGeneration);
return this;
}
public Builder wantedRebootGeneration(long wantedRebootGeneration) {
this.wantedRebootGeneration = wantedRebootGeneration;
return this;
}
public Builder currentRebootGeneration(long currentRebootGeneration) {
this.currentRebootGeneration = currentRebootGeneration;
return this;
}
public Builder minCpuCores(double minCpuCores) {
this.minCpuCores = minCpuCores;
return this;
}
public Builder minMainMemoryAvailableGb(double minMainMemoryAvailableGb) {
this.minMainMemoryAvailableGb = minMainMemoryAvailableGb;
return this;
}
public Builder minDiskAvailableGb(double minDiskAvailableGb) {
this.minDiskAvailableGb = minDiskAvailableGb;
return this;
}
public Builder fastDisk(boolean fastDisk) {
this.fastDisk = fastDisk;
return this;
}
public Builder ipAddresses(Set<String> ipAddresses) {
this.ipAddresses = ipAddresses;
return this;
}
public Builder hardwareDivergence(String hardwareDivergence) {
this.hardwareDivergence = Optional.of(hardwareDivergence);
return this;
}
public Builder parentHostname(String parentHostname) {
this.parentHostname = Optional.of(parentHostname);
return this;
}
public String getHostname() {
return hostname;
}
public Optional<DockerImage> getWantedDockerImage() {
return wantedDockerImage;
}
public Optional<DockerImage> getCurrentDockerImage() {
return currentDockerImage;
}
public Node.State getNodeState() {
return nodeState;
}
public NodeType getNodeType() {
return nodeType;
}
public String getNodeFlavor() {
return nodeFlavor;
}
public String getNodeCanonicalFlavor() {
return nodeCanonicalFlavor;
}
public Optional<String> getWantedVespaVersion() {
return wantedVespaVersion;
}
public Optional<String> getVespaVersion() {
return vespaVersion;
}
public Optional<Boolean> getAllowedToBeDown() {
return allowedToBeDown;
}
public Optional<Owner> getOwner() {
return owner;
}
public Optional<Membership> getMembership() {
return membership;
}
public Optional<Long> getWantedRestartGeneration() {
return wantedRestartGeneration;
}
public Optional<Long> getCurrentRestartGeneration() {
return currentRestartGeneration;
}
public long getWantedRebootGeneration() {
return wantedRebootGeneration;
}
public long getCurrentRebootGeneration() {
return currentRebootGeneration;
}
public double getMinCpuCores() {
return minCpuCores;
}
public double getMinMainMemoryAvailableGb() {
return minMainMemoryAvailableGb;
}
public double getMinDiskAvailableGb() {
return minDiskAvailableGb;
}
public boolean isFastDisk() {
return fastDisk;
}
public Set<String> getIpAddresses() {
return ipAddresses;
}
public Optional<String> getHardwareDivergence() {
return hardwareDivergence;
}
public Optional<String> getParentHostname() {
return parentHostname;
}
public NodeSpec build() {
return new NodeSpec(hostname, wantedDockerImage, currentDockerImage, nodeState, nodeType,
nodeFlavor, nodeCanonicalFlavor,
wantedVespaVersion, vespaVersion, allowedToBeDown, owner, membership,
wantedRestartGeneration, currentRestartGeneration,
wantedRebootGeneration, currentRebootGeneration,
minCpuCores, minMainMemoryAvailableGb, minDiskAvailableGb,
fastDisk, ipAddresses, hardwareDivergence, parentHostname);
}
} | class Builder {
private String hostname;
private Optional<DockerImage> wantedDockerImage = Optional.empty();
private Optional<DockerImage> currentDockerImage = Optional.empty();
private Node.State nodeState;
private NodeType nodeType;
private String nodeFlavor;
private String nodeCanonicalFlavor;
private Optional<String> wantedVespaVersion = Optional.empty();
private Optional<String> vespaVersion = Optional.empty();
private Optional<Boolean> allowedToBeDown = Optional.empty();
private Optional<Owner> owner = Optional.empty();
private Optional<Membership> membership = Optional.empty();
private Optional<Long> wantedRestartGeneration = Optional.empty();
private Optional<Long> currentRestartGeneration = Optional.empty();
private long wantedRebootGeneration;
private long currentRebootGeneration;
private double minCpuCores;
private double minMainMemoryAvailableGb;
private double minDiskAvailableGb;
private boolean fastDisk = false;
private Set<String> ipAddresses = Collections.emptySet();
private Optional<String> hardwareDivergence = Optional.empty();
private Optional<String> parentHostname = Optional.empty();
public Builder() {}
public Builder(NodeSpec node) {
hostname(node.hostname);
nodeState(node.nodeState);
nodeType(node.nodeType);
nodeFlavor(node.nodeFlavor);
nodeCanonicalFlavor(node.nodeCanonicalFlavor);
minCpuCores(node.minCpuCores);
minMainMemoryAvailableGb(node.minMainMemoryAvailableGb);
minDiskAvailableGb(node.minDiskAvailableGb);
fastDisk(node.fastDisk);
ipAddresses(node.ipAddresses);
wantedRebootGeneration(node.wantedRebootGeneration);
currentRebootGeneration(node.currentRebootGeneration);
node.wantedDockerImage.ifPresent(this::wantedDockerImage);
node.currentDockerImage.ifPresent(this::currentDockerImage);
node.wantedVespaVersion.ifPresent(this::wantedVespaVersion);
node.vespaVersion.ifPresent(this::vespaVersion);
node.allowedToBeDown.ifPresent(this::allowedToBeDown);
node.owner.ifPresent(this::owner);
node.membership.ifPresent(this::membership);
node.wantedRestartGeneration.ifPresent(this::wantedRestartGeneration);
node.currentRestartGeneration.ifPresent(this::currentRestartGeneration);
node.hardwareDivergence.ifPresent(this::hardwareDivergence);
node.parentHostname.ifPresent(this::parentHostname);
}
public Builder hostname(String hostname) {
this.hostname = hostname;
return this;
}
public Builder wantedDockerImage(DockerImage wantedDockerImage) {
this.wantedDockerImage = Optional.of(wantedDockerImage);
return this;
}
public Builder currentDockerImage(DockerImage currentDockerImage) {
this.currentDockerImage = Optional.of(currentDockerImage);
return this;
}
public Builder nodeState(Node.State nodeState) {
this.nodeState = nodeState;
return this;
}
public Builder nodeType(NodeType nodeType) {
this.nodeType = nodeType;
return this;
}
public Builder nodeFlavor(String nodeFlavor) {
this.nodeFlavor = nodeFlavor;
return this;
}
public Builder nodeCanonicalFlavor(String nodeCanonicalFlavor) {
this.nodeCanonicalFlavor = nodeCanonicalFlavor;
return this;
}
public Builder wantedVespaVersion(String wantedVespaVersion) {
this.wantedVespaVersion = Optional.of(wantedVespaVersion);
return this;
}
public Builder vespaVersion(String vespaVersion) {
this.vespaVersion = Optional.of(vespaVersion);
return this;
}
public Builder allowedToBeDown(boolean allowedToBeDown) {
this.allowedToBeDown = Optional.of(allowedToBeDown);
return this;
}
public Builder owner(Owner owner) {
this.owner = Optional.of(owner);
return this;
}
public Builder membership(Membership membership) {
this.membership = Optional.of(membership);
return this;
}
public Builder wantedRestartGeneration(long wantedRestartGeneration) {
this.wantedRestartGeneration = Optional.of(wantedRestartGeneration);
return this;
}
public Builder currentRestartGeneration(long currentRestartGeneration) {
this.currentRestartGeneration = Optional.of(currentRestartGeneration);
return this;
}
public Builder wantedRebootGeneration(long wantedRebootGeneration) {
this.wantedRebootGeneration = wantedRebootGeneration;
return this;
}
public Builder currentRebootGeneration(long currentRebootGeneration) {
this.currentRebootGeneration = currentRebootGeneration;
return this;
}
public Builder minCpuCores(double minCpuCores) {
this.minCpuCores = minCpuCores;
return this;
}
public Builder minMainMemoryAvailableGb(double minMainMemoryAvailableGb) {
this.minMainMemoryAvailableGb = minMainMemoryAvailableGb;
return this;
}
public Builder minDiskAvailableGb(double minDiskAvailableGb) {
this.minDiskAvailableGb = minDiskAvailableGb;
return this;
}
public Builder fastDisk(boolean fastDisk) {
this.fastDisk = fastDisk;
return this;
}
public Builder ipAddresses(Set<String> ipAddresses) {
this.ipAddresses = ipAddresses;
return this;
}
public Builder hardwareDivergence(String hardwareDivergence) {
this.hardwareDivergence = Optional.of(hardwareDivergence);
return this;
}
public Builder parentHostname(String parentHostname) {
this.parentHostname = Optional.of(parentHostname);
return this;
}
public String getHostname() {
return hostname;
}
public Optional<DockerImage> getWantedDockerImage() {
return wantedDockerImage;
}
public Optional<DockerImage> getCurrentDockerImage() {
return currentDockerImage;
}
public Node.State getNodeState() {
return nodeState;
}
public NodeType getNodeType() {
return nodeType;
}
public String getNodeFlavor() {
return nodeFlavor;
}
public String getNodeCanonicalFlavor() {
return nodeCanonicalFlavor;
}
public Optional<String> getWantedVespaVersion() {
return wantedVespaVersion;
}
public Optional<String> getVespaVersion() {
return vespaVersion;
}
public Optional<Boolean> getAllowedToBeDown() {
return allowedToBeDown;
}
public Optional<Owner> getOwner() {
return owner;
}
public Optional<Membership> getMembership() {
return membership;
}
public Optional<Long> getWantedRestartGeneration() {
return wantedRestartGeneration;
}
public Optional<Long> getCurrentRestartGeneration() {
return currentRestartGeneration;
}
public long getWantedRebootGeneration() {
return wantedRebootGeneration;
}
public long getCurrentRebootGeneration() {
return currentRebootGeneration;
}
public double getMinCpuCores() {
return minCpuCores;
}
public double getMinMainMemoryAvailableGb() {
return minMainMemoryAvailableGb;
}
public double getMinDiskAvailableGb() {
return minDiskAvailableGb;
}
public boolean isFastDisk() {
return fastDisk;
}
public Set<String> getIpAddresses() {
return ipAddresses;
}
public Optional<String> getHardwareDivergence() {
return hardwareDivergence;
}
public Optional<String> getParentHostname() {
return parentHostname;
}
public NodeSpec build() {
return new NodeSpec(hostname, wantedDockerImage, currentDockerImage, nodeState, nodeType,
nodeFlavor, nodeCanonicalFlavor,
wantedVespaVersion, vespaVersion, allowedToBeDown, owner, membership,
wantedRestartGeneration, currentRestartGeneration,
wantedRebootGeneration, currentRebootGeneration,
minCpuCores, minMainMemoryAvailableGb, minDiskAvailableGb,
fastDisk, ipAddresses, hardwareDivergence, parentHostname);
}
} |
Fixed in PR 5696 | public Builder updateFromNodeAttributes(NodeAttributes attributes) {
if (attributes.getDockerImage() != null) {
currentDockerImage = Optional.of(attributes.getDockerImage());
}
if (attributes.getHardwareDivergence() != null) {
hardwareDivergence = Optional.of(attributes.getHardwareDivergence());
}
if (attributes.getRebootGeneration() != null) {
currentRebootGeneration = attributes.getRebootGeneration();
}
if (attributes.getRestartGeneration() != null) {
currentRestartGeneration = Optional.of(attributes.getRestartGeneration());
}
return this;
} | if (attributes.getDockerImage() != null) { | public Builder updateFromNodeAttributes(NodeAttributes attributes) {
if (attributes.getDockerImage() != null) {
currentDockerImage = Optional.of(attributes.getDockerImage());
}
if (attributes.getHardwareDivergence() != null) {
hardwareDivergence = Optional.of(attributes.getHardwareDivergence());
}
if (attributes.getRebootGeneration() != null) {
currentRebootGeneration = attributes.getRebootGeneration();
}
if (attributes.getRestartGeneration() != null) {
currentRestartGeneration = Optional.of(attributes.getRestartGeneration());
}
return this;
} | class Builder {
private String hostname;
private Optional<DockerImage> wantedDockerImage = Optional.empty();
private Optional<DockerImage> currentDockerImage = Optional.empty();
private Node.State nodeState;
private NodeType nodeType;
private String nodeFlavor;
private String nodeCanonicalFlavor;
private Optional<String> wantedVespaVersion = Optional.empty();
private Optional<String> vespaVersion = Optional.empty();
private Optional<Boolean> allowedToBeDown = Optional.empty();
private Optional<Owner> owner = Optional.empty();
private Optional<Membership> membership = Optional.empty();
private Optional<Long> wantedRestartGeneration = Optional.empty();
private Optional<Long> currentRestartGeneration = Optional.empty();
private long wantedRebootGeneration;
private long currentRebootGeneration;
private double minCpuCores;
private double minMainMemoryAvailableGb;
private double minDiskAvailableGb;
private boolean fastDisk = false;
private Set<String> ipAddresses = Collections.emptySet();
private Optional<String> hardwareDivergence = Optional.empty();
private Optional<String> parentHostname = Optional.empty();
public Builder() {}
public Builder(NodeSpec node) {
hostname(node.hostname);
nodeState(node.nodeState);
nodeType(node.nodeType);
nodeFlavor(node.nodeFlavor);
nodeCanonicalFlavor(node.nodeCanonicalFlavor);
minCpuCores(node.minCpuCores);
minMainMemoryAvailableGb(node.minMainMemoryAvailableGb);
minDiskAvailableGb(node.minDiskAvailableGb);
fastDisk(node.fastDisk);
ipAddresses(node.ipAddresses);
wantedRebootGeneration(node.wantedRebootGeneration);
currentRebootGeneration(node.currentRebootGeneration);
node.wantedDockerImage.ifPresent(this::wantedDockerImage);
node.currentDockerImage.ifPresent(this::currentDockerImage);
node.wantedVespaVersion.ifPresent(this::wantedVespaVersion);
node.vespaVersion.ifPresent(this::vespaVersion);
node.allowedToBeDown.ifPresent(this::allowedToBeDown);
node.owner.ifPresent(this::owner);
node.membership.ifPresent(this::membership);
node.wantedRestartGeneration.ifPresent(this::wantedRestartGeneration);
node.currentRestartGeneration.ifPresent(this::currentRestartGeneration);
node.hardwareDivergence.ifPresent(this::hardwareDivergence);
node.parentHostname.ifPresent(this::parentHostname);
}
public Builder hostname(String hostname) {
this.hostname = hostname;
return this;
}
public Builder wantedDockerImage(DockerImage wantedDockerImage) {
this.wantedDockerImage = Optional.of(wantedDockerImage);
return this;
}
public Builder currentDockerImage(DockerImage currentDockerImage) {
this.currentDockerImage = Optional.of(currentDockerImage);
return this;
}
public Builder nodeState(Node.State nodeState) {
this.nodeState = nodeState;
return this;
}
public Builder nodeType(NodeType nodeType) {
this.nodeType = nodeType;
return this;
}
public Builder nodeFlavor(String nodeFlavor) {
this.nodeFlavor = nodeFlavor;
return this;
}
public Builder nodeCanonicalFlavor(String nodeCanonicalFlavor) {
this.nodeCanonicalFlavor = nodeCanonicalFlavor;
return this;
}
public Builder wantedVespaVersion(String wantedVespaVersion) {
this.wantedVespaVersion = Optional.of(wantedVespaVersion);
return this;
}
public Builder vespaVersion(String vespaVersion) {
this.vespaVersion = Optional.of(vespaVersion);
return this;
}
public Builder allowedToBeDown(boolean allowedToBeDown) {
this.allowedToBeDown = Optional.of(allowedToBeDown);
return this;
}
public Builder owner(Owner owner) {
this.owner = Optional.of(owner);
return this;
}
public Builder membership(Membership membership) {
this.membership = Optional.of(membership);
return this;
}
public Builder wantedRestartGeneration(long wantedRestartGeneration) {
this.wantedRestartGeneration = Optional.of(wantedRestartGeneration);
return this;
}
public Builder currentRestartGeneration(long currentRestartGeneration) {
this.currentRestartGeneration = Optional.of(currentRestartGeneration);
return this;
}
public Builder wantedRebootGeneration(long wantedRebootGeneration) {
this.wantedRebootGeneration = wantedRebootGeneration;
return this;
}
public Builder currentRebootGeneration(long currentRebootGeneration) {
this.currentRebootGeneration = currentRebootGeneration;
return this;
}
public Builder minCpuCores(double minCpuCores) {
this.minCpuCores = minCpuCores;
return this;
}
public Builder minMainMemoryAvailableGb(double minMainMemoryAvailableGb) {
this.minMainMemoryAvailableGb = minMainMemoryAvailableGb;
return this;
}
public Builder minDiskAvailableGb(double minDiskAvailableGb) {
this.minDiskAvailableGb = minDiskAvailableGb;
return this;
}
public Builder fastDisk(boolean fastDisk) {
this.fastDisk = fastDisk;
return this;
}
public Builder ipAddresses(Set<String> ipAddresses) {
this.ipAddresses = ipAddresses;
return this;
}
public Builder hardwareDivergence(String hardwareDivergence) {
this.hardwareDivergence = Optional.of(hardwareDivergence);
return this;
}
public Builder parentHostname(String parentHostname) {
this.parentHostname = Optional.of(parentHostname);
return this;
}
public String getHostname() {
return hostname;
}
public Optional<DockerImage> getWantedDockerImage() {
return wantedDockerImage;
}
public Optional<DockerImage> getCurrentDockerImage() {
return currentDockerImage;
}
public Node.State getNodeState() {
return nodeState;
}
public NodeType getNodeType() {
return nodeType;
}
public String getNodeFlavor() {
return nodeFlavor;
}
public String getNodeCanonicalFlavor() {
return nodeCanonicalFlavor;
}
public Optional<String> getWantedVespaVersion() {
return wantedVespaVersion;
}
public Optional<String> getVespaVersion() {
return vespaVersion;
}
public Optional<Boolean> getAllowedToBeDown() {
return allowedToBeDown;
}
public Optional<Owner> getOwner() {
return owner;
}
public Optional<Membership> getMembership() {
return membership;
}
public Optional<Long> getWantedRestartGeneration() {
return wantedRestartGeneration;
}
public Optional<Long> getCurrentRestartGeneration() {
return currentRestartGeneration;
}
public long getWantedRebootGeneration() {
return wantedRebootGeneration;
}
public long getCurrentRebootGeneration() {
return currentRebootGeneration;
}
public double getMinCpuCores() {
return minCpuCores;
}
public double getMinMainMemoryAvailableGb() {
return minMainMemoryAvailableGb;
}
public double getMinDiskAvailableGb() {
return minDiskAvailableGb;
}
public boolean isFastDisk() {
return fastDisk;
}
public Set<String> getIpAddresses() {
return ipAddresses;
}
public Optional<String> getHardwareDivergence() {
return hardwareDivergence;
}
public Optional<String> getParentHostname() {
return parentHostname;
}
public NodeSpec build() {
return new NodeSpec(hostname, wantedDockerImage, currentDockerImage, nodeState, nodeType,
nodeFlavor, nodeCanonicalFlavor,
wantedVespaVersion, vespaVersion, allowedToBeDown, owner, membership,
wantedRestartGeneration, currentRestartGeneration,
wantedRebootGeneration, currentRebootGeneration,
minCpuCores, minMainMemoryAvailableGb, minDiskAvailableGb,
fastDisk, ipAddresses, hardwareDivergence, parentHostname);
}
} | class Builder {
private String hostname;
private Optional<DockerImage> wantedDockerImage = Optional.empty();
private Optional<DockerImage> currentDockerImage = Optional.empty();
private Node.State nodeState;
private NodeType nodeType;
private String nodeFlavor;
private String nodeCanonicalFlavor;
private Optional<String> wantedVespaVersion = Optional.empty();
private Optional<String> vespaVersion = Optional.empty();
private Optional<Boolean> allowedToBeDown = Optional.empty();
private Optional<Owner> owner = Optional.empty();
private Optional<Membership> membership = Optional.empty();
private Optional<Long> wantedRestartGeneration = Optional.empty();
private Optional<Long> currentRestartGeneration = Optional.empty();
private long wantedRebootGeneration;
private long currentRebootGeneration;
private double minCpuCores;
private double minMainMemoryAvailableGb;
private double minDiskAvailableGb;
private boolean fastDisk = false;
private Set<String> ipAddresses = Collections.emptySet();
private Optional<String> hardwareDivergence = Optional.empty();
private Optional<String> parentHostname = Optional.empty();
public Builder() {}
public Builder(NodeSpec node) {
hostname(node.hostname);
nodeState(node.nodeState);
nodeType(node.nodeType);
nodeFlavor(node.nodeFlavor);
nodeCanonicalFlavor(node.nodeCanonicalFlavor);
minCpuCores(node.minCpuCores);
minMainMemoryAvailableGb(node.minMainMemoryAvailableGb);
minDiskAvailableGb(node.minDiskAvailableGb);
fastDisk(node.fastDisk);
ipAddresses(node.ipAddresses);
wantedRebootGeneration(node.wantedRebootGeneration);
currentRebootGeneration(node.currentRebootGeneration);
node.wantedDockerImage.ifPresent(this::wantedDockerImage);
node.currentDockerImage.ifPresent(this::currentDockerImage);
node.wantedVespaVersion.ifPresent(this::wantedVespaVersion);
node.vespaVersion.ifPresent(this::vespaVersion);
node.allowedToBeDown.ifPresent(this::allowedToBeDown);
node.owner.ifPresent(this::owner);
node.membership.ifPresent(this::membership);
node.wantedRestartGeneration.ifPresent(this::wantedRestartGeneration);
node.currentRestartGeneration.ifPresent(this::currentRestartGeneration);
node.hardwareDivergence.ifPresent(this::hardwareDivergence);
node.parentHostname.ifPresent(this::parentHostname);
}
public Builder hostname(String hostname) {
this.hostname = hostname;
return this;
}
public Builder wantedDockerImage(DockerImage wantedDockerImage) {
this.wantedDockerImage = Optional.of(wantedDockerImage);
return this;
}
public Builder currentDockerImage(DockerImage currentDockerImage) {
this.currentDockerImage = Optional.of(currentDockerImage);
return this;
}
public Builder nodeState(Node.State nodeState) {
this.nodeState = nodeState;
return this;
}
public Builder nodeType(NodeType nodeType) {
this.nodeType = nodeType;
return this;
}
public Builder nodeFlavor(String nodeFlavor) {
this.nodeFlavor = nodeFlavor;
return this;
}
public Builder nodeCanonicalFlavor(String nodeCanonicalFlavor) {
this.nodeCanonicalFlavor = nodeCanonicalFlavor;
return this;
}
public Builder wantedVespaVersion(String wantedVespaVersion) {
this.wantedVespaVersion = Optional.of(wantedVespaVersion);
return this;
}
public Builder vespaVersion(String vespaVersion) {
this.vespaVersion = Optional.of(vespaVersion);
return this;
}
public Builder allowedToBeDown(boolean allowedToBeDown) {
this.allowedToBeDown = Optional.of(allowedToBeDown);
return this;
}
public Builder owner(Owner owner) {
this.owner = Optional.of(owner);
return this;
}
public Builder membership(Membership membership) {
this.membership = Optional.of(membership);
return this;
}
public Builder wantedRestartGeneration(long wantedRestartGeneration) {
this.wantedRestartGeneration = Optional.of(wantedRestartGeneration);
return this;
}
public Builder currentRestartGeneration(long currentRestartGeneration) {
this.currentRestartGeneration = Optional.of(currentRestartGeneration);
return this;
}
public Builder wantedRebootGeneration(long wantedRebootGeneration) {
this.wantedRebootGeneration = wantedRebootGeneration;
return this;
}
public Builder currentRebootGeneration(long currentRebootGeneration) {
this.currentRebootGeneration = currentRebootGeneration;
return this;
}
public Builder minCpuCores(double minCpuCores) {
this.minCpuCores = minCpuCores;
return this;
}
public Builder minMainMemoryAvailableGb(double minMainMemoryAvailableGb) {
this.minMainMemoryAvailableGb = minMainMemoryAvailableGb;
return this;
}
public Builder minDiskAvailableGb(double minDiskAvailableGb) {
this.minDiskAvailableGb = minDiskAvailableGb;
return this;
}
public Builder fastDisk(boolean fastDisk) {
this.fastDisk = fastDisk;
return this;
}
public Builder ipAddresses(Set<String> ipAddresses) {
this.ipAddresses = ipAddresses;
return this;
}
public Builder hardwareDivergence(String hardwareDivergence) {
this.hardwareDivergence = Optional.of(hardwareDivergence);
return this;
}
public Builder parentHostname(String parentHostname) {
this.parentHostname = Optional.of(parentHostname);
return this;
}
public String getHostname() {
return hostname;
}
public Optional<DockerImage> getWantedDockerImage() {
return wantedDockerImage;
}
public Optional<DockerImage> getCurrentDockerImage() {
return currentDockerImage;
}
public Node.State getNodeState() {
return nodeState;
}
public NodeType getNodeType() {
return nodeType;
}
public String getNodeFlavor() {
return nodeFlavor;
}
public String getNodeCanonicalFlavor() {
return nodeCanonicalFlavor;
}
public Optional<String> getWantedVespaVersion() {
return wantedVespaVersion;
}
public Optional<String> getVespaVersion() {
return vespaVersion;
}
public Optional<Boolean> getAllowedToBeDown() {
return allowedToBeDown;
}
public Optional<Owner> getOwner() {
return owner;
}
public Optional<Membership> getMembership() {
return membership;
}
public Optional<Long> getWantedRestartGeneration() {
return wantedRestartGeneration;
}
public Optional<Long> getCurrentRestartGeneration() {
return currentRestartGeneration;
}
public long getWantedRebootGeneration() {
return wantedRebootGeneration;
}
public long getCurrentRebootGeneration() {
return currentRebootGeneration;
}
public double getMinCpuCores() {
return minCpuCores;
}
public double getMinMainMemoryAvailableGb() {
return minMainMemoryAvailableGb;
}
public double getMinDiskAvailableGb() {
return minDiskAvailableGb;
}
public boolean isFastDisk() {
return fastDisk;
}
public Set<String> getIpAddresses() {
return ipAddresses;
}
public Optional<String> getHardwareDivergence() {
return hardwareDivergence;
}
public Optional<String> getParentHostname() {
return parentHostname;
}
public NodeSpec build() {
return new NodeSpec(hostname, wantedDockerImage, currentDockerImage, nodeState, nodeType,
nodeFlavor, nodeCanonicalFlavor,
wantedVespaVersion, vespaVersion, allowedToBeDown, owner, membership,
wantedRestartGeneration, currentRestartGeneration,
wantedRebootGeneration, currentRebootGeneration,
minCpuCores, minMainMemoryAvailableGb, minDiskAvailableGb,
fastDisk, ipAddresses, hardwareDivergence, parentHostname);
}
} |
This won't match "cd-us-east-1a" | public ProxyResponse handle(ProxyRequest proxyRequest) throws ProxyException {
if (proxyRequest.isDiscoveryRequest()) {
return createDiscoveryResponse(proxyRequest);
}
ZoneId zoneId = ZoneId.from(proxyRequest.getEnvironment(), proxyRequest.getRegion());
StringBuilder errorBuilder = new StringBuilder();
if (zoneId.region().value().startsWith("aws-") || zoneId.region().value().startsWith("cd-aws-")) {
URI uri = zoneRegistry.getConfigServerVipUri(zoneId)
.orElseThrow(() -> new RuntimeException("Could not find a config server VIP for " + zoneId));
Optional<ProxyResponse> proxyResponse = proxyCall(uri, proxyRequest, errorBuilder);
if (proxyResponse.isPresent()) {
return proxyResponse.get();
}
} else {
List<URI> allServers = new ArrayList<>(zoneRegistry.getConfigServerUris(zoneId));
if (queueFirstServerIfDown(allServers, proxyRequest)) {
errorBuilder.append("Change ordering due to failed ping.");
}
for (URI uri : allServers) {
Optional<ProxyResponse> proxyResponse = proxyCall(uri, proxyRequest, errorBuilder);
if (proxyResponse.isPresent()) {
return proxyResponse.get();
}
}
}
throw new ProxyException(ErrorResponse.internalServerError("Failed talking to config servers: "
+ errorBuilder.toString()));
} | if (zoneId.region().value().startsWith("aws-") || zoneId.region().value().startsWith("cd-aws-")) { | public ProxyResponse handle(ProxyRequest proxyRequest) throws ProxyException {
if (proxyRequest.isDiscoveryRequest()) {
return createDiscoveryResponse(proxyRequest);
}
ZoneId zoneId = ZoneId.from(proxyRequest.getEnvironment(), proxyRequest.getRegion());
StringBuilder errorBuilder = new StringBuilder();
if (zoneId.region().value().startsWith("aws-")
|| zoneId.region().value().startsWith("cd-aws-")
|| zoneId.region().value().equals("cd-us-east-1a")) {
URI uri = zoneRegistry.getConfigServerVipUri(zoneId)
.orElseThrow(() -> new RuntimeException("Could not find a config server VIP for " + zoneId));
Optional<ProxyResponse> proxyResponse = proxyCall(uri, proxyRequest, errorBuilder);
if (proxyResponse.isPresent()) {
return proxyResponse.get();
}
} else {
List<URI> allServers = new ArrayList<>(zoneRegistry.getConfigServerUris(zoneId));
if (queueFirstServerIfDown(allServers, proxyRequest)) {
errorBuilder.append("Change ordering due to failed ping.");
}
for (URI uri : allServers) {
Optional<ProxyResponse> proxyResponse = proxyCall(uri, proxyRequest, errorBuilder);
if (proxyResponse.isPresent()) {
return proxyResponse.get();
}
}
}
throw new ProxyException(ErrorResponse.internalServerError("Failed talking to config servers: "
+ errorBuilder.toString()));
} | class ConfigServerRestExecutorImpl implements ConfigServerRestExecutor {
private static final Logger log = Logger.getLogger(ConfigServerRestExecutorImpl.class.getName());
private static final Duration PROXY_REQUEST_TIMEOUT = Duration.ofSeconds(10);
private static final Set<String> HEADERS_TO_COPY = new HashSet<>(Arrays.asList("X-HTTP-Method-Override", "Content-Type"));
private final ZoneRegistry zoneRegistry;
private final AthenzSslContextProvider sslContextProvider;
@Inject
public ConfigServerRestExecutorImpl(ZoneRegistry zoneRegistry, AthenzSslContextProvider sslContextProvider) {
this.zoneRegistry = zoneRegistry;
this.sslContextProvider = sslContextProvider;
}
@Override
private static class DiscoveryResponseStructure {
List<String> uris = new ArrayList<>();
}
private ProxyResponse createDiscoveryResponse(ProxyRequest proxyRequest) {
ObjectMapper mapper = new ObjectMapper();
DiscoveryResponseStructure responseStructure = new DiscoveryResponseStructure();
String environmentName = proxyRequest.getEnvironment();
ZoneList zones = zoneRegistry.zones().all();
if ( ! environmentName.isEmpty())
zones = zones.in(Environment.from(environmentName));
for (ZoneId zoneId : zones.ids()) {
responseStructure.uris.add(proxyRequest.getScheme() + ":
zoneId.environment().name() + "/" + zoneId.region().value());
}
JsonNode node = mapper.valueToTree(responseStructure);
return new ProxyResponse(proxyRequest, node.toString(), 200, Optional.empty(), "application/json");
}
private static String removeFirstSlashIfAny(String url) {
if (url.startsWith("/")) {
return url.substring(1);
}
return url;
}
private Optional<ProxyResponse> proxyCall(URI uri, ProxyRequest proxyRequest, StringBuilder errorBuilder)
throws ProxyException {
String fullUri = uri.toString() + removeFirstSlashIfAny(proxyRequest.getConfigServerRequest());
final HttpRequestBase requestBase = createHttpBaseRequest(proxyRequest.getMethod(), fullUri, proxyRequest.getData());
copyHeaders(proxyRequest.getHeaders(), requestBase);
RequestConfig config = RequestConfig.custom()
.setConnectTimeout((int) PROXY_REQUEST_TIMEOUT.toMillis())
.setConnectionRequestTimeout((int) PROXY_REQUEST_TIMEOUT.toMillis())
.setSocketTimeout((int) PROXY_REQUEST_TIMEOUT.toMillis()).build();
try (CloseableHttpClient client = createHttpClient(config, sslContextProvider, zoneRegistry, proxyRequest);
CloseableHttpResponse response = client.execute(requestBase)) {
String content = getContent(response);
int status = response.getStatusLine().getStatusCode();
if (status / 100 == 5) {
errorBuilder.append("Talking to server ").append(uri.getHost())
.append(", got ").append(status).append(" ").append(content).append("\n");
log.log(LogLevel.DEBUG, () -> String.format("Got response from %s with status code %d and content:\n %s",
uri.getHost(), status, content));
return Optional.empty();
}
final Header contentHeader = response.getLastHeader("Content-Type");
final String contentType;
if (contentHeader != null && contentHeader.getValue() != null && ! contentHeader.getValue().isEmpty()) {
contentType = contentHeader.getValue().replace("; charset=UTF-8","");
} else {
contentType = "application/json";
}
return Optional.of(new ProxyResponse(proxyRequest, content, status, Optional.of(uri), contentType));
} catch (Exception e) {
errorBuilder.append("Talking to server ").append(uri.getHost()).append(" got exception ").append(e.getMessage());
log.log(LogLevel.DEBUG, e, () -> "Got exception while sending request to " + uri.getHost());
return Optional.empty();
}
}
private static String getContent(CloseableHttpResponse response) {
return Optional.ofNullable(response.getEntity())
.map(entity ->
{
try {
return EntityUtils.toString(entity);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
).orElse("");
}
private static HttpRequestBase createHttpBaseRequest(String method, String uri, InputStream data) throws ProxyException {
Method enumMethod = Method.valueOf(method);
switch (enumMethod) {
case GET:
return new HttpGet(uri);
case POST:
HttpPost post = new HttpPost(uri);
if (data != null) {
post.setEntity(new InputStreamEntity(data));
}
return post;
case PUT:
HttpPut put = new HttpPut(uri);
if (data != null) {
put.setEntity(new InputStreamEntity(data));
}
return put;
case DELETE:
return new HttpDelete(uri);
case PATCH:
HttpPatch patch = new HttpPatch(uri);
if (data != null) {
patch.setEntity(new InputStreamEntity(data));
}
return patch;
default:
throw new ProxyException(ErrorResponse.methodNotAllowed("Will not proxy such calls."));
}
}
private static void copyHeaders(Map<String, List<String>> headers, HttpRequestBase toRequest) {
for (Map.Entry<String, List<String>> headerEntry : headers.entrySet()) {
if (HEADERS_TO_COPY.contains(headerEntry.getKey())) {
for (String value : headerEntry.getValue()) {
toRequest.addHeader(headerEntry.getKey(), value);
}
}
}
}
/**
* During upgrade, one server can be down, this is normal. Therefor we do a quick ping on the first server,
* if it is not responding, we try the other servers first. False positive/negatives are not critical,
* but will increase latency to some extent.
*/
private boolean queueFirstServerIfDown(List<URI> allServers, ProxyRequest proxyRequest) {
if (allServers.size() < 2) { return false; }
URI uri = allServers.get(0);
HttpGet httpget = new HttpGet(uri);
int timeout = 500;
RequestConfig config = RequestConfig.custom()
.setConnectTimeout(timeout)
.setConnectionRequestTimeout(timeout)
.setSocketTimeout(timeout).build();
try (CloseableHttpClient client = createHttpClient(config, sslContextProvider, zoneRegistry, proxyRequest);
CloseableHttpResponse response = client.execute(httpget)) {
if (response.getStatusLine().getStatusCode() == 200) {
return false;
}
} catch (IOException e) {
}
allServers.remove(0);
allServers.add(uri);
return true;
}
private static CloseableHttpClient createHttpClient(RequestConfig config,
AthenzSslContextProvider sslContextProvider,
ZoneRegistry zoneRegistry,
ProxyRequest proxyRequest) {
AthenzIdentityVerifier hostnameVerifier =
new AthenzIdentityVerifier(
singleton(
zoneRegistry.getConfigServerAthenzService(
ZoneId.from(proxyRequest.getEnvironment(), proxyRequest.getRegion()))));
return HttpClientBuilder.create()
.setUserAgent("config-server-proxy-client")
.setSslcontext(sslContextProvider.get())
.setHostnameVerifier(new AthenzIdentityVerifierAdapter(hostnameVerifier))
.setDefaultRequestConfig(config)
.build();
}
private static class AthenzIdentityVerifierAdapter implements X509HostnameVerifier {
private final AthenzIdentityVerifier verifier;
AthenzIdentityVerifierAdapter(AthenzIdentityVerifier verifier) {
this.verifier = verifier;
}
@Override
public boolean verify(String hostname, SSLSession sslSession) {
return verifier.verify(hostname, sslSession);
}
@Override
public void verify(String host, SSLSocket ssl) { /* All sockets accepted */}
@Override
public void verify(String hostname, X509Certificate certificate) throws SSLException {
AthenzIdentity identity = AthenzIdentities.from(certificate);
if (!verifier.isTrusted(identity)) {
throw new SSLException("Athenz identity is not trusted: " + identity.getFullName());
}
}
@Override
public void verify(String hostname, String[] cns, String[] subjectAlts) throws SSLException {
AthenzIdentity identity = AthenzIdentities.from(cns[0]);
if (!verifier.isTrusted(identity)) {
throw new SSLException("Athenz identity is not trusted: " + identity.getFullName());
}
}
}
} | class ConfigServerRestExecutorImpl implements ConfigServerRestExecutor {
private static final Logger log = Logger.getLogger(ConfigServerRestExecutorImpl.class.getName());
private static final Duration PROXY_REQUEST_TIMEOUT = Duration.ofSeconds(10);
private static final Set<String> HEADERS_TO_COPY = new HashSet<>(Arrays.asList("X-HTTP-Method-Override", "Content-Type"));
private final ZoneRegistry zoneRegistry;
private final AthenzSslContextProvider sslContextProvider;
@Inject
public ConfigServerRestExecutorImpl(ZoneRegistry zoneRegistry, AthenzSslContextProvider sslContextProvider) {
this.zoneRegistry = zoneRegistry;
this.sslContextProvider = sslContextProvider;
}
@Override
private static class DiscoveryResponseStructure {
List<String> uris = new ArrayList<>();
}
private ProxyResponse createDiscoveryResponse(ProxyRequest proxyRequest) {
ObjectMapper mapper = new ObjectMapper();
DiscoveryResponseStructure responseStructure = new DiscoveryResponseStructure();
String environmentName = proxyRequest.getEnvironment();
ZoneList zones = zoneRegistry.zones().all();
if ( ! environmentName.isEmpty())
zones = zones.in(Environment.from(environmentName));
for (ZoneId zoneId : zones.ids()) {
responseStructure.uris.add(proxyRequest.getScheme() + ":
zoneId.environment().name() + "/" + zoneId.region().value());
}
JsonNode node = mapper.valueToTree(responseStructure);
return new ProxyResponse(proxyRequest, node.toString(), 200, Optional.empty(), "application/json");
}
private static String removeFirstSlashIfAny(String url) {
if (url.startsWith("/")) {
return url.substring(1);
}
return url;
}
private Optional<ProxyResponse> proxyCall(URI uri, ProxyRequest proxyRequest, StringBuilder errorBuilder)
throws ProxyException {
String fullUri = uri.toString() + removeFirstSlashIfAny(proxyRequest.getConfigServerRequest());
final HttpRequestBase requestBase = createHttpBaseRequest(proxyRequest.getMethod(), fullUri, proxyRequest.getData());
copyHeaders(proxyRequest.getHeaders(), requestBase);
RequestConfig config = RequestConfig.custom()
.setConnectTimeout((int) PROXY_REQUEST_TIMEOUT.toMillis())
.setConnectionRequestTimeout((int) PROXY_REQUEST_TIMEOUT.toMillis())
.setSocketTimeout((int) PROXY_REQUEST_TIMEOUT.toMillis()).build();
try (CloseableHttpClient client = createHttpClient(config, sslContextProvider, zoneRegistry, proxyRequest);
CloseableHttpResponse response = client.execute(requestBase)) {
String content = getContent(response);
int status = response.getStatusLine().getStatusCode();
if (status / 100 == 5) {
errorBuilder.append("Talking to server ").append(uri.getHost())
.append(", got ").append(status).append(" ").append(content).append("\n");
log.log(LogLevel.DEBUG, () -> String.format("Got response from %s with status code %d and content:\n %s",
uri.getHost(), status, content));
return Optional.empty();
}
final Header contentHeader = response.getLastHeader("Content-Type");
final String contentType;
if (contentHeader != null && contentHeader.getValue() != null && ! contentHeader.getValue().isEmpty()) {
contentType = contentHeader.getValue().replace("; charset=UTF-8","");
} else {
contentType = "application/json";
}
return Optional.of(new ProxyResponse(proxyRequest, content, status, Optional.of(uri), contentType));
} catch (Exception e) {
errorBuilder.append("Talking to server ").append(uri.getHost()).append(" got exception ").append(e.getMessage());
log.log(LogLevel.DEBUG, e, () -> "Got exception while sending request to " + uri.getHost());
return Optional.empty();
}
}
private static String getContent(CloseableHttpResponse response) {
return Optional.ofNullable(response.getEntity())
.map(entity ->
{
try {
return EntityUtils.toString(entity);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
).orElse("");
}
private static HttpRequestBase createHttpBaseRequest(String method, String uri, InputStream data) throws ProxyException {
Method enumMethod = Method.valueOf(method);
switch (enumMethod) {
case GET:
return new HttpGet(uri);
case POST:
HttpPost post = new HttpPost(uri);
if (data != null) {
post.setEntity(new InputStreamEntity(data));
}
return post;
case PUT:
HttpPut put = new HttpPut(uri);
if (data != null) {
put.setEntity(new InputStreamEntity(data));
}
return put;
case DELETE:
return new HttpDelete(uri);
case PATCH:
HttpPatch patch = new HttpPatch(uri);
if (data != null) {
patch.setEntity(new InputStreamEntity(data));
}
return patch;
default:
throw new ProxyException(ErrorResponse.methodNotAllowed("Will not proxy such calls."));
}
}
private static void copyHeaders(Map<String, List<String>> headers, HttpRequestBase toRequest) {
for (Map.Entry<String, List<String>> headerEntry : headers.entrySet()) {
if (HEADERS_TO_COPY.contains(headerEntry.getKey())) {
for (String value : headerEntry.getValue()) {
toRequest.addHeader(headerEntry.getKey(), value);
}
}
}
}
/**
* During upgrade, one server can be down, this is normal. Therefor we do a quick ping on the first server,
* if it is not responding, we try the other servers first. False positive/negatives are not critical,
* but will increase latency to some extent.
*/
private boolean queueFirstServerIfDown(List<URI> allServers, ProxyRequest proxyRequest) {
if (allServers.size() < 2) { return false; }
URI uri = allServers.get(0);
HttpGet httpget = new HttpGet(uri);
int timeout = 500;
RequestConfig config = RequestConfig.custom()
.setConnectTimeout(timeout)
.setConnectionRequestTimeout(timeout)
.setSocketTimeout(timeout).build();
try (CloseableHttpClient client = createHttpClient(config, sslContextProvider, zoneRegistry, proxyRequest);
CloseableHttpResponse response = client.execute(httpget)) {
if (response.getStatusLine().getStatusCode() == 200) {
return false;
}
} catch (IOException e) {
}
allServers.remove(0);
allServers.add(uri);
return true;
}
private static CloseableHttpClient createHttpClient(RequestConfig config,
AthenzSslContextProvider sslContextProvider,
ZoneRegistry zoneRegistry,
ProxyRequest proxyRequest) {
AthenzIdentityVerifier hostnameVerifier =
new AthenzIdentityVerifier(
singleton(
zoneRegistry.getConfigServerAthenzService(
ZoneId.from(proxyRequest.getEnvironment(), proxyRequest.getRegion()))));
return HttpClientBuilder.create()
.setUserAgent("config-server-proxy-client")
.setSslcontext(sslContextProvider.get())
.setHostnameVerifier(new AthenzIdentityVerifierAdapter(hostnameVerifier))
.setDefaultRequestConfig(config)
.build();
}
private static class AthenzIdentityVerifierAdapter implements X509HostnameVerifier {
private final AthenzIdentityVerifier verifier;
AthenzIdentityVerifierAdapter(AthenzIdentityVerifier verifier) {
this.verifier = verifier;
}
@Override
public boolean verify(String hostname, SSLSession sslSession) {
return verifier.verify(hostname, sslSession);
}
@Override
public void verify(String host, SSLSocket ssl) { /* All sockets accepted */}
@Override
public void verify(String hostname, X509Certificate certificate) throws SSLException {
AthenzIdentity identity = AthenzIdentities.from(certificate);
if (!verifier.isTrusted(identity)) {
throw new SSLException("Athenz identity is not trusted: " + identity.getFullName());
}
}
@Override
public void verify(String hostname, String[] cns, String[] subjectAlts) throws SSLException {
AthenzIdentity identity = AthenzIdentities.from(cns[0]);
if (!verifier.isTrusted(identity)) {
throw new SSLException("Athenz identity is not trusted: " + identity.getFullName());
}
}
}
} |
Ah, ok I got it. Thanks! | public DeploymentInstanceSpec instance(InstanceName name) {
for (Step step : steps) {
if ( ! (step instanceof DeploymentInstanceSpec)) continue;
DeploymentInstanceSpec instanceStep = (DeploymentInstanceSpec)step;
if (instanceStep.name().equals(name))
return instanceStep;
}
return null;
} | for (Step step : steps) { | public DeploymentInstanceSpec instance(InstanceName name) {
for (DeploymentInstanceSpec instance : instances()) {
if (instance.name().equals(name))
return instance;
}
return null;
} | class DeploymentSpec {
/** The empty deployment spec, specifying no zones or rotation, and defaults for all settings */
public static final DeploymentSpec empty = new DeploymentSpec(Optional.empty(),
UpgradePolicy.defaultPolicy,
Optional.empty(),
Collections.emptyList(),
Collections.emptyList(),
"<deployment version='1.0'/>",
Optional.empty(),
Optional.empty(),
Notifications.none(),
List.of());
private final List<Step> steps;
private final Optional<Integer> majorVersion;
private final String xmlForm;
public DeploymentSpec(List<Step> steps,
Optional<Integer> majorVersion,
String xmlForm) {
if (singleInstance(steps)) {
var singleInstance = (DeploymentInstanceSpec)steps.get(0);
this.steps = List.of(singleInstance.withSteps(completeSteps(singleInstance.steps())));
}
else {
this.steps = List.copyOf(completeSteps(steps));
}
this.majorVersion = majorVersion;
this.xmlForm = xmlForm;
validateTotalDelay(steps);
}
public DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy, Optional<Integer> majorVersion,
List<ChangeBlocker> changeBlockers, List<Step> steps, String xmlForm,
Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService,
Notifications notifications,
List<Endpoint> endpoints) {
this(List.of(new DeploymentInstanceSpec(InstanceName.from("default"),
steps,
upgradePolicy,
changeBlockers,
globalServiceId,
athenzDomain,
athenzService,
notifications,
endpoints)),
majorVersion,
xmlForm);
}
/** Adds missing required steps and reorders steps to a permissible order */
private static List<DeploymentSpec.Step> completeSteps(List<DeploymentSpec.Step> inputSteps) {
List<Step> steps = new ArrayList<>(inputSteps);
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.prod)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.staging))) {
steps.add(new DeploymentSpec.DeclaredZone(Environment.staging));
}
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.staging)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.test))) {
steps.add(new DeploymentSpec.DeclaredZone(Environment.test));
}
DeploymentSpec.DeclaredZone testStep = remove(Environment.test, steps);
if (testStep != null)
steps.add(0, testStep);
DeploymentSpec.DeclaredZone stagingStep = remove(Environment.staging, steps);
if (stagingStep != null)
steps.add(1, stagingStep);
return steps;
}
/**
* Removes the first occurrence of a deployment step to the given environment and returns it.
*
* @return the removed step, or null if it is not present
*/
private static DeploymentSpec.DeclaredZone remove(Environment environment, List<DeploymentSpec.Step> steps) {
for (int i = 0; i < steps.size(); i++) {
if ( ! (steps.get(i) instanceof DeploymentSpec.DeclaredZone)) continue;
DeploymentSpec.DeclaredZone zoneStep = (DeploymentSpec.DeclaredZone)steps.get(i);
if (zoneStep.environment() == environment) {
steps.remove(i);
return zoneStep;
}
}
return null;
}
/** Throw an IllegalArgumentException if the total delay exceeds 24 hours */
private void validateTotalDelay(List<Step> steps) {
long totalDelaySeconds = steps.stream().mapToLong(step -> (step.delay().getSeconds())).sum();
if (totalDelaySeconds > Duration.ofHours(24).getSeconds())
throw new IllegalArgumentException("The total delay specified is " + Duration.ofSeconds(totalDelaySeconds) +
" but max 24 hours is allowed");
}
private DeploymentInstanceSpec defaultInstance() {
if (singleInstance(steps)) return (DeploymentInstanceSpec)steps.get(0);
throw new IllegalArgumentException("This deployment spec does not support the legacy API " +
"as it has multiple instances: " +
instances().stream().map(Step::toString).collect(Collectors.joining(",")));
}
public Optional<String> globalServiceId() { return defaultInstance().globalServiceId(); }
public UpgradePolicy upgradePolicy() { return defaultInstance().upgradePolicy(); }
/** Returns the major version this application is pinned to, or empty (default) to allow all major versions */
public Optional<Integer> majorVersion() { return majorVersion; }
public boolean canUpgradeAt(Instant instant) { return defaultInstance().canUpgradeAt(instant); }
public boolean canChangeRevisionAt(Instant instant) { return defaultInstance().canChangeRevisionAt(instant); }
public List<ChangeBlocker> changeBlocker() { return defaultInstance().changeBlocker(); }
/** Returns the deployment steps of this in the order they will be performed */
public List<Step> steps() {
if (singleInstance(steps)) return defaultInstance().steps();
return steps;
}
public List<DeclaredZone> zones() {
return defaultInstance().steps().stream()
.flatMap(step -> step.zones().stream())
.collect(Collectors.toList());
}
public Optional<AthenzDomain> athenzDomain() { return defaultInstance().athenzDomain(); }
public Optional<AthenzService> athenzService(Environment environment, RegionName region) {
return defaultInstance().athenzService(environment, region);
}
public Notifications notifications() { return defaultInstance().notifications(); }
public List<Endpoint> endpoints() { return defaultInstance().endpoints(); }
/** Returns the XML form of this spec, or null if it was not created by fromXml, nor is empty */
public String xmlForm() { return xmlForm; }
public boolean includes(Environment environment, Optional<RegionName> region) {
return defaultInstance().deploysTo(environment, region);
}
private static boolean singleInstance(List<DeploymentSpec.Step> steps) {
return steps.size() == 1 && steps.get(0) instanceof DeploymentInstanceSpec;
}
/** Returns the instance step containing the given instance name, or null if not present */
public DeploymentInstanceSpec instance(String name) {
return instance(InstanceName.from(name));
}
/** Returns the instance step containing the given instance name, or null if not present */
/** Returns the instance step containing the given instance name, or throws an IllegalArgumentException if not present */
public DeploymentInstanceSpec requireInstance(String name) {
return requireInstance(InstanceName.from(name));
}
public DeploymentInstanceSpec requireInstance(InstanceName name) {
DeploymentInstanceSpec instance = instance(name);
if (instance == null)
throw new IllegalArgumentException("No instance '" + name + "' in deployment.xml'. Instances: " +
instances().stream().map(spec -> spec.name().toString()).collect(Collectors.joining(",")));
return instance;
}
/** Returns the steps of this which are instances */
public List<DeploymentInstanceSpec> instances() {
return steps.stream()
.filter(step -> step instanceof DeploymentInstanceSpec).map(DeploymentInstanceSpec.class::cast)
.collect(Collectors.toList());
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(Reader reader) {
return new DeploymentSpecXmlReader().read(reader);
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(String xmlForm) {
return fromXml(xmlForm, true);
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(String xmlForm, boolean validate) {
return new DeploymentSpecXmlReader(validate).read(xmlForm);
}
public static String toMessageString(Throwable t) {
StringBuilder b = new StringBuilder();
String lastMessage = null;
String message;
for (; t != null; t = t.getCause()) {
message = t.getMessage();
if (message == null) continue;
if (message.equals(lastMessage)) continue;
if (b.length() > 0) {
b.append(": ");
}
b.append(message);
lastMessage = message;
}
return b.toString();
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
DeploymentSpec other = (DeploymentSpec) o;
return majorVersion.equals(other.majorVersion) &&
steps.equals(other.steps) &&
xmlForm.equals(other.xmlForm);
}
@Override
public int hashCode() {
return Objects.hash(majorVersion, steps, xmlForm);
}
/** This may be invoked by a continuous build */
public static void main(String[] args) {
if (args.length != 2 && args.length != 3) {
System.err.println("Usage: DeploymentSpec [file] [environment] [region]?" +
"Returns 0 if the specified zone matches the deployment spec, 1 otherwise");
System.exit(1);
}
try (BufferedReader reader = new BufferedReader(new FileReader(args[0]))) {
DeploymentSpec spec = DeploymentSpec.fromXml(reader);
Environment environment = Environment.from(args[1]);
Optional<RegionName> region = args.length == 3 ? Optional.of(RegionName.from(args[2])) : Optional.empty();
if (spec.includes(environment, region))
System.exit(0);
else
System.exit(1);
}
catch (Exception e) {
System.err.println("Exception checking deployment spec: " + toMessageString(e));
System.exit(1);
}
}
/** A deployment step */
public abstract static class Step {
/** Returns whether this step deploys to the given region */
public final boolean deploysTo(Environment environment) {
return deploysTo(environment, Optional.empty());
}
/** Returns whether this step deploys to the given environment, and (if specified) region */
public abstract boolean deploysTo(Environment environment, Optional<RegionName> region);
/** Returns the zones deployed to in this step */
public List<DeclaredZone> zones() { return Collections.emptyList(); }
/** The delay introduced by this step (beyond the time it takes to execute the step). Default is zero. */
public Duration delay() { return Duration.ZERO; }
/** Returns all the steps nested in this. This default implementatiino returns an empty list. */
public List<Step> steps() { return List.of(); }
}
/** A deployment step which is to wait for some time before progressing to the next step */
public static class Delay extends Step {
private final Duration duration;
public Delay(Duration duration) {
this.duration = duration;
}
public Duration duration() { return duration; }
@Override
public Duration delay() { return duration; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) { return false; }
@Override
public String toString() {
return "delay " + duration;
}
}
/** A deployment step which is to run deployment in a particular zone */
public static class DeclaredZone extends Step {
private final Environment environment;
private final Optional<RegionName> region;
private final boolean active;
private final Optional<AthenzService> athenzService;
private final Optional<String> testerFlavor;
public DeclaredZone(Environment environment) {
this(environment, Optional.empty(), false);
}
public DeclaredZone(Environment environment, Optional<RegionName> region, boolean active) {
this(environment, region, active, Optional.empty(), Optional.empty());
}
public DeclaredZone(Environment environment, Optional<RegionName> region, boolean active, Optional<AthenzService> athenzService) {
this(environment, region, active, athenzService, Optional.empty());
}
public DeclaredZone(Environment environment, Optional<RegionName> region, boolean active,
Optional<AthenzService> athenzService, Optional<String> testerFlavor) {
if (environment != Environment.prod && region.isPresent())
throw new IllegalArgumentException("Non-prod environments cannot specify a region");
if (environment == Environment.prod && region.isEmpty())
throw new IllegalArgumentException("Prod environments must be specified with a region");
this.environment = environment;
this.region = region;
this.active = active;
this.athenzService = athenzService;
this.testerFlavor = testerFlavor;
}
public Environment environment() { return environment; }
/** The region name, or empty if not declared */
public Optional<RegionName> region() { return region; }
/** Returns whether this zone should receive production traffic */
public boolean active() { return active; }
public Optional<String> testerFlavor() { return testerFlavor; }
public Optional<AthenzService> athenzService() { return athenzService; }
@Override
public List<DeclaredZone> zones() { return Collections.singletonList(this); }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) {
if (environment != this.environment) return false;
if (region.isPresent() && ! region.equals(this.region)) return false;
return true;
}
@Override
public int hashCode() {
return Objects.hash(environment, region);
}
@Override
public boolean equals(Object o) {
if (o == this) return true;
if ( ! (o instanceof DeclaredZone)) return false;
DeclaredZone other = (DeclaredZone)o;
if (this.environment != other.environment) return false;
if ( ! this.region.equals(other.region())) return false;
return true;
}
@Override
public String toString() {
return environment + (region.map(regionName -> "." + regionName).orElse(""));
}
}
/** A deployment step which is to run multiple steps (zones or instances) in parallel */
public static class ParallelZones extends Step {
private final List<Step> steps;
public ParallelZones(List<Step> steps) {
this.steps = List.copyOf(steps);
}
/** Returns the steps inside this which are zones */
@Override
public List<DeclaredZone> zones() {
return this.steps.stream()
.filter(step -> step instanceof DeclaredZone)
.map(DeclaredZone.class::cast)
.collect(Collectors.toList());
}
/** Returns all the steps nested in this */
@Override
public List<Step> steps() { return steps; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) {
return steps().stream().anyMatch(zone -> zone.deploysTo(environment, region));
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof ParallelZones)) return false;
ParallelZones that = (ParallelZones) o;
return Objects.equals(steps, that.steps);
}
@Override
public int hashCode() {
return Objects.hash(steps);
}
@Override
public String toString() {
return steps.size() + " parallel steps";
}
}
/** Controls when this application will be upgraded to new Vespa versions */
public enum UpgradePolicy {
/** Canary: Applications with this policy will upgrade before any other */
canary,
/** Default: Will upgrade after all canary applications upgraded successfully. The default. */
defaultPolicy,
/** Will upgrade after most default applications upgraded successfully */
conservative
}
/** A blocking of changes in a given time window */
public static class ChangeBlocker {
private final boolean revision;
private final boolean version;
private final TimeWindow window;
public ChangeBlocker(boolean revision, boolean version, TimeWindow window) {
this.revision = revision;
this.version = version;
this.window = window;
}
public boolean blocksRevisions() { return revision; }
public boolean blocksVersions() { return version; }
public TimeWindow window() { return window; }
@Override
public String toString() {
return "change blocker revision=" + revision + " version=" + version + " window=" + window;
}
}
} | class DeploymentSpec {
/** The empty deployment spec, specifying no zones or rotation, and defaults for all settings */
public static final DeploymentSpec empty = new DeploymentSpec(Optional.empty(),
UpgradePolicy.defaultPolicy,
Optional.empty(),
Collections.emptyList(),
Collections.emptyList(),
"<deployment version='1.0'/>",
Optional.empty(),
Optional.empty(),
Notifications.none(),
List.of());
private final List<Step> steps;
private final Optional<Integer> majorVersion;
private final String xmlForm;
public DeploymentSpec(List<Step> steps,
Optional<Integer> majorVersion,
String xmlForm) {
if (singleInstance(steps)) {
var singleInstance = (DeploymentInstanceSpec)steps.get(0);
this.steps = List.of(singleInstance.withSteps(completeSteps(singleInstance.steps())));
}
else {
this.steps = List.copyOf(completeSteps(steps));
}
this.majorVersion = majorVersion;
this.xmlForm = xmlForm;
validateTotalDelay(steps);
}
public DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy, Optional<Integer> majorVersion,
List<ChangeBlocker> changeBlockers, List<Step> steps, String xmlForm,
Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService,
Notifications notifications,
List<Endpoint> endpoints) {
this(List.of(new DeploymentInstanceSpec(InstanceName.from("default"),
steps,
upgradePolicy,
changeBlockers,
globalServiceId,
athenzDomain,
athenzService,
notifications,
endpoints)),
majorVersion,
xmlForm);
}
/** Adds missing required steps and reorders steps to a permissible order */
private static List<DeploymentSpec.Step> completeSteps(List<DeploymentSpec.Step> inputSteps) {
List<Step> steps = new ArrayList<>(inputSteps);
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.prod)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.staging))) {
steps.add(new DeploymentSpec.DeclaredZone(Environment.staging));
}
if (steps.stream().anyMatch(step -> step.deploysTo(Environment.staging)) &&
steps.stream().noneMatch(step -> step.deploysTo(Environment.test))) {
steps.add(new DeploymentSpec.DeclaredZone(Environment.test));
}
DeploymentSpec.DeclaredZone testStep = remove(Environment.test, steps);
if (testStep != null)
steps.add(0, testStep);
DeploymentSpec.DeclaredZone stagingStep = remove(Environment.staging, steps);
if (stagingStep != null)
steps.add(1, stagingStep);
return steps;
}
/**
* Removes the first occurrence of a deployment step to the given environment and returns it.
*
* @return the removed step, or null if it is not present
*/
private static DeploymentSpec.DeclaredZone remove(Environment environment, List<DeploymentSpec.Step> steps) {
for (int i = 0; i < steps.size(); i++) {
if ( ! (steps.get(i) instanceof DeploymentSpec.DeclaredZone)) continue;
DeploymentSpec.DeclaredZone zoneStep = (DeploymentSpec.DeclaredZone)steps.get(i);
if (zoneStep.environment() == environment) {
steps.remove(i);
return zoneStep;
}
}
return null;
}
/** Throw an IllegalArgumentException if the total delay exceeds 24 hours */
private void validateTotalDelay(List<Step> steps) {
long totalDelaySeconds = steps.stream().mapToLong(step -> (step.delay().getSeconds())).sum();
if (totalDelaySeconds > Duration.ofHours(24).getSeconds())
throw new IllegalArgumentException("The total delay specified is " + Duration.ofSeconds(totalDelaySeconds) +
" but max 24 hours is allowed");
}
private DeploymentInstanceSpec defaultInstance() {
if (singleInstance(steps)) return (DeploymentInstanceSpec)steps.get(0);
throw new IllegalArgumentException("This deployment spec does not support the legacy API " +
"as it has multiple instances: " +
instances().stream().map(Step::toString).collect(Collectors.joining(",")));
}
public Optional<String> globalServiceId() { return defaultInstance().globalServiceId(); }
public UpgradePolicy upgradePolicy() { return defaultInstance().upgradePolicy(); }
/** Returns the major version this application is pinned to, or empty (default) to allow all major versions */
public Optional<Integer> majorVersion() { return majorVersion; }
public boolean canUpgradeAt(Instant instant) { return defaultInstance().canUpgradeAt(instant); }
public boolean canChangeRevisionAt(Instant instant) { return defaultInstance().canChangeRevisionAt(instant); }
public List<ChangeBlocker> changeBlocker() { return defaultInstance().changeBlocker(); }
/** Returns the deployment steps of this in the order they will be performed */
public List<Step> steps() {
if (singleInstance(steps)) return defaultInstance().steps();
return steps;
}
public List<DeclaredZone> zones() {
return defaultInstance().steps().stream()
.flatMap(step -> step.zones().stream())
.collect(Collectors.toList());
}
public Optional<AthenzDomain> athenzDomain() { return defaultInstance().athenzDomain(); }
public Optional<AthenzService> athenzService(Environment environment, RegionName region) {
return defaultInstance().athenzService(environment, region);
}
public Notifications notifications() { return defaultInstance().notifications(); }
public List<Endpoint> endpoints() { return defaultInstance().endpoints(); }
/** Returns the XML form of this spec, or null if it was not created by fromXml, nor is empty */
public String xmlForm() { return xmlForm; }
public boolean includes(Environment environment, Optional<RegionName> region) {
return defaultInstance().deploysTo(environment, region);
}
private static boolean singleInstance(List<DeploymentSpec.Step> steps) {
return steps.size() == 1 && steps.get(0) instanceof DeploymentInstanceSpec;
}
/** Returns the instance step containing the given instance name, or null if not present */
public DeploymentInstanceSpec instance(String name) {
return instance(InstanceName.from(name));
}
/** Returns the instance step containing the given instance name, or null if not present */
/** Returns the instance step containing the given instance name, or throws an IllegalArgumentException if not present */
public DeploymentInstanceSpec requireInstance(String name) {
return requireInstance(InstanceName.from(name));
}
public DeploymentInstanceSpec requireInstance(InstanceName name) {
DeploymentInstanceSpec instance = instance(name);
if (instance == null)
throw new IllegalArgumentException("No instance '" + name + "' in deployment.xml'. Instances: " +
instances().stream().map(spec -> spec.name().toString()).collect(Collectors.joining(",")));
return instance;
}
/** Returns the steps of this which are instances */
public List<DeploymentInstanceSpec> instances() {
return steps.stream()
.filter(step -> step instanceof DeploymentInstanceSpec).map(DeploymentInstanceSpec.class::cast)
.collect(Collectors.toList());
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(Reader reader) {
return new DeploymentSpecXmlReader().read(reader);
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(String xmlForm) {
return fromXml(xmlForm, true);
}
/**
* Creates a deployment spec from XML.
*
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(String xmlForm, boolean validate) {
return new DeploymentSpecXmlReader(validate).read(xmlForm);
}
public static String toMessageString(Throwable t) {
StringBuilder b = new StringBuilder();
String lastMessage = null;
String message;
for (; t != null; t = t.getCause()) {
message = t.getMessage();
if (message == null) continue;
if (message.equals(lastMessage)) continue;
if (b.length() > 0) {
b.append(": ");
}
b.append(message);
lastMessage = message;
}
return b.toString();
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
DeploymentSpec other = (DeploymentSpec) o;
return majorVersion.equals(other.majorVersion) &&
steps.equals(other.steps) &&
xmlForm.equals(other.xmlForm);
}
@Override
public int hashCode() {
return Objects.hash(majorVersion, steps, xmlForm);
}
/** This may be invoked by a continuous build */
public static void main(String[] args) {
if (args.length != 2 && args.length != 3) {
System.err.println("Usage: DeploymentSpec [file] [environment] [region]?" +
"Returns 0 if the specified zone matches the deployment spec, 1 otherwise");
System.exit(1);
}
try (BufferedReader reader = new BufferedReader(new FileReader(args[0]))) {
DeploymentSpec spec = DeploymentSpec.fromXml(reader);
Environment environment = Environment.from(args[1]);
Optional<RegionName> region = args.length == 3 ? Optional.of(RegionName.from(args[2])) : Optional.empty();
if (spec.includes(environment, region))
System.exit(0);
else
System.exit(1);
}
catch (Exception e) {
System.err.println("Exception checking deployment spec: " + toMessageString(e));
System.exit(1);
}
}
/** A deployment step */
public abstract static class Step {
/** Returns whether this step deploys to the given region */
public final boolean deploysTo(Environment environment) {
return deploysTo(environment, Optional.empty());
}
/** Returns whether this step deploys to the given environment, and (if specified) region */
public abstract boolean deploysTo(Environment environment, Optional<RegionName> region);
/** Returns the zones deployed to in this step */
public List<DeclaredZone> zones() { return Collections.emptyList(); }
/** The delay introduced by this step (beyond the time it takes to execute the step). Default is zero. */
public Duration delay() { return Duration.ZERO; }
/** Returns all the steps nested in this. This default implementatiino returns an empty list. */
public List<Step> steps() { return List.of(); }
}
/** A deployment step which is to wait for some time before progressing to the next step */
public static class Delay extends Step {
private final Duration duration;
public Delay(Duration duration) {
this.duration = duration;
}
public Duration duration() { return duration; }
@Override
public Duration delay() { return duration; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) { return false; }
@Override
public String toString() {
return "delay " + duration;
}
}
/** A deployment step which is to run deployment in a particular zone */
public static class DeclaredZone extends Step {
private final Environment environment;
private final Optional<RegionName> region;
private final boolean active;
private final Optional<AthenzService> athenzService;
private final Optional<String> testerFlavor;
public DeclaredZone(Environment environment) {
this(environment, Optional.empty(), false);
}
public DeclaredZone(Environment environment, Optional<RegionName> region, boolean active) {
this(environment, region, active, Optional.empty(), Optional.empty());
}
public DeclaredZone(Environment environment, Optional<RegionName> region, boolean active, Optional<AthenzService> athenzService) {
this(environment, region, active, athenzService, Optional.empty());
}
public DeclaredZone(Environment environment, Optional<RegionName> region, boolean active,
Optional<AthenzService> athenzService, Optional<String> testerFlavor) {
if (environment != Environment.prod && region.isPresent())
throw new IllegalArgumentException("Non-prod environments cannot specify a region");
if (environment == Environment.prod && region.isEmpty())
throw new IllegalArgumentException("Prod environments must be specified with a region");
this.environment = environment;
this.region = region;
this.active = active;
this.athenzService = athenzService;
this.testerFlavor = testerFlavor;
}
public Environment environment() { return environment; }
/** The region name, or empty if not declared */
public Optional<RegionName> region() { return region; }
/** Returns whether this zone should receive production traffic */
public boolean active() { return active; }
public Optional<String> testerFlavor() { return testerFlavor; }
public Optional<AthenzService> athenzService() { return athenzService; }
@Override
public List<DeclaredZone> zones() { return Collections.singletonList(this); }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) {
if (environment != this.environment) return false;
if (region.isPresent() && ! region.equals(this.region)) return false;
return true;
}
@Override
public int hashCode() {
return Objects.hash(environment, region);
}
@Override
public boolean equals(Object o) {
if (o == this) return true;
if ( ! (o instanceof DeclaredZone)) return false;
DeclaredZone other = (DeclaredZone)o;
if (this.environment != other.environment) return false;
if ( ! this.region.equals(other.region())) return false;
return true;
}
@Override
public String toString() {
return environment + (region.map(regionName -> "." + regionName).orElse(""));
}
}
/** A deployment step which is to run multiple steps (zones or instances) in parallel */
public static class ParallelZones extends Step {
private final List<Step> steps;
public ParallelZones(List<Step> steps) {
this.steps = List.copyOf(steps);
}
/** Returns the steps inside this which are zones */
@Override
public List<DeclaredZone> zones() {
return this.steps.stream()
.filter(step -> step instanceof DeclaredZone)
.map(DeclaredZone.class::cast)
.collect(Collectors.toList());
}
/** Returns all the steps nested in this */
@Override
public List<Step> steps() { return steps; }
@Override
public boolean deploysTo(Environment environment, Optional<RegionName> region) {
return steps().stream().anyMatch(zone -> zone.deploysTo(environment, region));
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof ParallelZones)) return false;
ParallelZones that = (ParallelZones) o;
return Objects.equals(steps, that.steps);
}
@Override
public int hashCode() {
return Objects.hash(steps);
}
@Override
public String toString() {
return steps.size() + " parallel steps";
}
}
/** Controls when this application will be upgraded to new Vespa versions */
public enum UpgradePolicy {
/** Canary: Applications with this policy will upgrade before any other */
canary,
/** Default: Will upgrade after all canary applications upgraded successfully. The default. */
defaultPolicy,
/** Will upgrade after most default applications upgraded successfully */
conservative
}
/** A blocking of changes in a given time window */
public static class ChangeBlocker {
private final boolean revision;
private final boolean version;
private final TimeWindow window;
public ChangeBlocker(boolean revision, boolean version, TimeWindow window) {
this.revision = revision;
this.version = version;
this.window = window;
}
public boolean blocksRevisions() { return revision; }
public boolean blocksVersions() { return version; }
public TimeWindow window() { return window; }
@Override
public String toString() {
return "change blocker revision=" + revision + " version=" + version + " window=" + window;
}
}
} |
CD zones was renamed to cd-aws-us-east-1a (but that change was reverted, so you are right). I'll add it. | public ProxyResponse handle(ProxyRequest proxyRequest) throws ProxyException {
if (proxyRequest.isDiscoveryRequest()) {
return createDiscoveryResponse(proxyRequest);
}
ZoneId zoneId = ZoneId.from(proxyRequest.getEnvironment(), proxyRequest.getRegion());
StringBuilder errorBuilder = new StringBuilder();
if (zoneId.region().value().startsWith("aws-") || zoneId.region().value().startsWith("cd-aws-")) {
URI uri = zoneRegistry.getConfigServerVipUri(zoneId)
.orElseThrow(() -> new RuntimeException("Could not find a config server VIP for " + zoneId));
Optional<ProxyResponse> proxyResponse = proxyCall(uri, proxyRequest, errorBuilder);
if (proxyResponse.isPresent()) {
return proxyResponse.get();
}
} else {
List<URI> allServers = new ArrayList<>(zoneRegistry.getConfigServerUris(zoneId));
if (queueFirstServerIfDown(allServers, proxyRequest)) {
errorBuilder.append("Change ordering due to failed ping.");
}
for (URI uri : allServers) {
Optional<ProxyResponse> proxyResponse = proxyCall(uri, proxyRequest, errorBuilder);
if (proxyResponse.isPresent()) {
return proxyResponse.get();
}
}
}
throw new ProxyException(ErrorResponse.internalServerError("Failed talking to config servers: "
+ errorBuilder.toString()));
} | if (zoneId.region().value().startsWith("aws-") || zoneId.region().value().startsWith("cd-aws-")) { | public ProxyResponse handle(ProxyRequest proxyRequest) throws ProxyException {
if (proxyRequest.isDiscoveryRequest()) {
return createDiscoveryResponse(proxyRequest);
}
ZoneId zoneId = ZoneId.from(proxyRequest.getEnvironment(), proxyRequest.getRegion());
StringBuilder errorBuilder = new StringBuilder();
if (zoneId.region().value().startsWith("aws-")
|| zoneId.region().value().startsWith("cd-aws-")
|| zoneId.region().value().equals("cd-us-east-1a")) {
URI uri = zoneRegistry.getConfigServerVipUri(zoneId)
.orElseThrow(() -> new RuntimeException("Could not find a config server VIP for " + zoneId));
Optional<ProxyResponse> proxyResponse = proxyCall(uri, proxyRequest, errorBuilder);
if (proxyResponse.isPresent()) {
return proxyResponse.get();
}
} else {
List<URI> allServers = new ArrayList<>(zoneRegistry.getConfigServerUris(zoneId));
if (queueFirstServerIfDown(allServers, proxyRequest)) {
errorBuilder.append("Change ordering due to failed ping.");
}
for (URI uri : allServers) {
Optional<ProxyResponse> proxyResponse = proxyCall(uri, proxyRequest, errorBuilder);
if (proxyResponse.isPresent()) {
return proxyResponse.get();
}
}
}
throw new ProxyException(ErrorResponse.internalServerError("Failed talking to config servers: "
+ errorBuilder.toString()));
} | class ConfigServerRestExecutorImpl implements ConfigServerRestExecutor {
private static final Logger log = Logger.getLogger(ConfigServerRestExecutorImpl.class.getName());
private static final Duration PROXY_REQUEST_TIMEOUT = Duration.ofSeconds(10);
private static final Set<String> HEADERS_TO_COPY = new HashSet<>(Arrays.asList("X-HTTP-Method-Override", "Content-Type"));
private final ZoneRegistry zoneRegistry;
private final AthenzSslContextProvider sslContextProvider;
@Inject
public ConfigServerRestExecutorImpl(ZoneRegistry zoneRegistry, AthenzSslContextProvider sslContextProvider) {
this.zoneRegistry = zoneRegistry;
this.sslContextProvider = sslContextProvider;
}
@Override
private static class DiscoveryResponseStructure {
List<String> uris = new ArrayList<>();
}
private ProxyResponse createDiscoveryResponse(ProxyRequest proxyRequest) {
ObjectMapper mapper = new ObjectMapper();
DiscoveryResponseStructure responseStructure = new DiscoveryResponseStructure();
String environmentName = proxyRequest.getEnvironment();
ZoneList zones = zoneRegistry.zones().all();
if ( ! environmentName.isEmpty())
zones = zones.in(Environment.from(environmentName));
for (ZoneId zoneId : zones.ids()) {
responseStructure.uris.add(proxyRequest.getScheme() + ":
zoneId.environment().name() + "/" + zoneId.region().value());
}
JsonNode node = mapper.valueToTree(responseStructure);
return new ProxyResponse(proxyRequest, node.toString(), 200, Optional.empty(), "application/json");
}
private static String removeFirstSlashIfAny(String url) {
if (url.startsWith("/")) {
return url.substring(1);
}
return url;
}
private Optional<ProxyResponse> proxyCall(URI uri, ProxyRequest proxyRequest, StringBuilder errorBuilder)
throws ProxyException {
String fullUri = uri.toString() + removeFirstSlashIfAny(proxyRequest.getConfigServerRequest());
final HttpRequestBase requestBase = createHttpBaseRequest(proxyRequest.getMethod(), fullUri, proxyRequest.getData());
copyHeaders(proxyRequest.getHeaders(), requestBase);
RequestConfig config = RequestConfig.custom()
.setConnectTimeout((int) PROXY_REQUEST_TIMEOUT.toMillis())
.setConnectionRequestTimeout((int) PROXY_REQUEST_TIMEOUT.toMillis())
.setSocketTimeout((int) PROXY_REQUEST_TIMEOUT.toMillis()).build();
try (CloseableHttpClient client = createHttpClient(config, sslContextProvider, zoneRegistry, proxyRequest);
CloseableHttpResponse response = client.execute(requestBase)) {
String content = getContent(response);
int status = response.getStatusLine().getStatusCode();
if (status / 100 == 5) {
errorBuilder.append("Talking to server ").append(uri.getHost())
.append(", got ").append(status).append(" ").append(content).append("\n");
log.log(LogLevel.DEBUG, () -> String.format("Got response from %s with status code %d and content:\n %s",
uri.getHost(), status, content));
return Optional.empty();
}
final Header contentHeader = response.getLastHeader("Content-Type");
final String contentType;
if (contentHeader != null && contentHeader.getValue() != null && ! contentHeader.getValue().isEmpty()) {
contentType = contentHeader.getValue().replace("; charset=UTF-8","");
} else {
contentType = "application/json";
}
return Optional.of(new ProxyResponse(proxyRequest, content, status, Optional.of(uri), contentType));
} catch (Exception e) {
errorBuilder.append("Talking to server ").append(uri.getHost()).append(" got exception ").append(e.getMessage());
log.log(LogLevel.DEBUG, e, () -> "Got exception while sending request to " + uri.getHost());
return Optional.empty();
}
}
private static String getContent(CloseableHttpResponse response) {
return Optional.ofNullable(response.getEntity())
.map(entity ->
{
try {
return EntityUtils.toString(entity);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
).orElse("");
}
private static HttpRequestBase createHttpBaseRequest(String method, String uri, InputStream data) throws ProxyException {
Method enumMethod = Method.valueOf(method);
switch (enumMethod) {
case GET:
return new HttpGet(uri);
case POST:
HttpPost post = new HttpPost(uri);
if (data != null) {
post.setEntity(new InputStreamEntity(data));
}
return post;
case PUT:
HttpPut put = new HttpPut(uri);
if (data != null) {
put.setEntity(new InputStreamEntity(data));
}
return put;
case DELETE:
return new HttpDelete(uri);
case PATCH:
HttpPatch patch = new HttpPatch(uri);
if (data != null) {
patch.setEntity(new InputStreamEntity(data));
}
return patch;
default:
throw new ProxyException(ErrorResponse.methodNotAllowed("Will not proxy such calls."));
}
}
private static void copyHeaders(Map<String, List<String>> headers, HttpRequestBase toRequest) {
for (Map.Entry<String, List<String>> headerEntry : headers.entrySet()) {
if (HEADERS_TO_COPY.contains(headerEntry.getKey())) {
for (String value : headerEntry.getValue()) {
toRequest.addHeader(headerEntry.getKey(), value);
}
}
}
}
/**
* During upgrade, one server can be down, this is normal. Therefor we do a quick ping on the first server,
* if it is not responding, we try the other servers first. False positive/negatives are not critical,
* but will increase latency to some extent.
*/
private boolean queueFirstServerIfDown(List<URI> allServers, ProxyRequest proxyRequest) {
if (allServers.size() < 2) { return false; }
URI uri = allServers.get(0);
HttpGet httpget = new HttpGet(uri);
int timeout = 500;
RequestConfig config = RequestConfig.custom()
.setConnectTimeout(timeout)
.setConnectionRequestTimeout(timeout)
.setSocketTimeout(timeout).build();
try (CloseableHttpClient client = createHttpClient(config, sslContextProvider, zoneRegistry, proxyRequest);
CloseableHttpResponse response = client.execute(httpget)) {
if (response.getStatusLine().getStatusCode() == 200) {
return false;
}
} catch (IOException e) {
}
allServers.remove(0);
allServers.add(uri);
return true;
}
private static CloseableHttpClient createHttpClient(RequestConfig config,
AthenzSslContextProvider sslContextProvider,
ZoneRegistry zoneRegistry,
ProxyRequest proxyRequest) {
AthenzIdentityVerifier hostnameVerifier =
new AthenzIdentityVerifier(
singleton(
zoneRegistry.getConfigServerAthenzService(
ZoneId.from(proxyRequest.getEnvironment(), proxyRequest.getRegion()))));
return HttpClientBuilder.create()
.setUserAgent("config-server-proxy-client")
.setSslcontext(sslContextProvider.get())
.setHostnameVerifier(new AthenzIdentityVerifierAdapter(hostnameVerifier))
.setDefaultRequestConfig(config)
.build();
}
private static class AthenzIdentityVerifierAdapter implements X509HostnameVerifier {
private final AthenzIdentityVerifier verifier;
AthenzIdentityVerifierAdapter(AthenzIdentityVerifier verifier) {
this.verifier = verifier;
}
@Override
public boolean verify(String hostname, SSLSession sslSession) {
return verifier.verify(hostname, sslSession);
}
@Override
public void verify(String host, SSLSocket ssl) { /* All sockets accepted */}
@Override
public void verify(String hostname, X509Certificate certificate) throws SSLException {
AthenzIdentity identity = AthenzIdentities.from(certificate);
if (!verifier.isTrusted(identity)) {
throw new SSLException("Athenz identity is not trusted: " + identity.getFullName());
}
}
@Override
public void verify(String hostname, String[] cns, String[] subjectAlts) throws SSLException {
AthenzIdentity identity = AthenzIdentities.from(cns[0]);
if (!verifier.isTrusted(identity)) {
throw new SSLException("Athenz identity is not trusted: " + identity.getFullName());
}
}
}
} | class ConfigServerRestExecutorImpl implements ConfigServerRestExecutor {
private static final Logger log = Logger.getLogger(ConfigServerRestExecutorImpl.class.getName());
private static final Duration PROXY_REQUEST_TIMEOUT = Duration.ofSeconds(10);
private static final Set<String> HEADERS_TO_COPY = new HashSet<>(Arrays.asList("X-HTTP-Method-Override", "Content-Type"));
private final ZoneRegistry zoneRegistry;
private final AthenzSslContextProvider sslContextProvider;
@Inject
public ConfigServerRestExecutorImpl(ZoneRegistry zoneRegistry, AthenzSslContextProvider sslContextProvider) {
this.zoneRegistry = zoneRegistry;
this.sslContextProvider = sslContextProvider;
}
@Override
private static class DiscoveryResponseStructure {
List<String> uris = new ArrayList<>();
}
private ProxyResponse createDiscoveryResponse(ProxyRequest proxyRequest) {
ObjectMapper mapper = new ObjectMapper();
DiscoveryResponseStructure responseStructure = new DiscoveryResponseStructure();
String environmentName = proxyRequest.getEnvironment();
ZoneList zones = zoneRegistry.zones().all();
if ( ! environmentName.isEmpty())
zones = zones.in(Environment.from(environmentName));
for (ZoneId zoneId : zones.ids()) {
responseStructure.uris.add(proxyRequest.getScheme() + ":
zoneId.environment().name() + "/" + zoneId.region().value());
}
JsonNode node = mapper.valueToTree(responseStructure);
return new ProxyResponse(proxyRequest, node.toString(), 200, Optional.empty(), "application/json");
}
private static String removeFirstSlashIfAny(String url) {
if (url.startsWith("/")) {
return url.substring(1);
}
return url;
}
private Optional<ProxyResponse> proxyCall(URI uri, ProxyRequest proxyRequest, StringBuilder errorBuilder)
throws ProxyException {
String fullUri = uri.toString() + removeFirstSlashIfAny(proxyRequest.getConfigServerRequest());
final HttpRequestBase requestBase = createHttpBaseRequest(proxyRequest.getMethod(), fullUri, proxyRequest.getData());
copyHeaders(proxyRequest.getHeaders(), requestBase);
RequestConfig config = RequestConfig.custom()
.setConnectTimeout((int) PROXY_REQUEST_TIMEOUT.toMillis())
.setConnectionRequestTimeout((int) PROXY_REQUEST_TIMEOUT.toMillis())
.setSocketTimeout((int) PROXY_REQUEST_TIMEOUT.toMillis()).build();
try (CloseableHttpClient client = createHttpClient(config, sslContextProvider, zoneRegistry, proxyRequest);
CloseableHttpResponse response = client.execute(requestBase)) {
String content = getContent(response);
int status = response.getStatusLine().getStatusCode();
if (status / 100 == 5) {
errorBuilder.append("Talking to server ").append(uri.getHost())
.append(", got ").append(status).append(" ").append(content).append("\n");
log.log(LogLevel.DEBUG, () -> String.format("Got response from %s with status code %d and content:\n %s",
uri.getHost(), status, content));
return Optional.empty();
}
final Header contentHeader = response.getLastHeader("Content-Type");
final String contentType;
if (contentHeader != null && contentHeader.getValue() != null && ! contentHeader.getValue().isEmpty()) {
contentType = contentHeader.getValue().replace("; charset=UTF-8","");
} else {
contentType = "application/json";
}
return Optional.of(new ProxyResponse(proxyRequest, content, status, Optional.of(uri), contentType));
} catch (Exception e) {
errorBuilder.append("Talking to server ").append(uri.getHost()).append(" got exception ").append(e.getMessage());
log.log(LogLevel.DEBUG, e, () -> "Got exception while sending request to " + uri.getHost());
return Optional.empty();
}
}
private static String getContent(CloseableHttpResponse response) {
return Optional.ofNullable(response.getEntity())
.map(entity ->
{
try {
return EntityUtils.toString(entity);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
).orElse("");
}
private static HttpRequestBase createHttpBaseRequest(String method, String uri, InputStream data) throws ProxyException {
Method enumMethod = Method.valueOf(method);
switch (enumMethod) {
case GET:
return new HttpGet(uri);
case POST:
HttpPost post = new HttpPost(uri);
if (data != null) {
post.setEntity(new InputStreamEntity(data));
}
return post;
case PUT:
HttpPut put = new HttpPut(uri);
if (data != null) {
put.setEntity(new InputStreamEntity(data));
}
return put;
case DELETE:
return new HttpDelete(uri);
case PATCH:
HttpPatch patch = new HttpPatch(uri);
if (data != null) {
patch.setEntity(new InputStreamEntity(data));
}
return patch;
default:
throw new ProxyException(ErrorResponse.methodNotAllowed("Will not proxy such calls."));
}
}
private static void copyHeaders(Map<String, List<String>> headers, HttpRequestBase toRequest) {
for (Map.Entry<String, List<String>> headerEntry : headers.entrySet()) {
if (HEADERS_TO_COPY.contains(headerEntry.getKey())) {
for (String value : headerEntry.getValue()) {
toRequest.addHeader(headerEntry.getKey(), value);
}
}
}
}
/**
* During upgrade, one server can be down, this is normal. Therefor we do a quick ping on the first server,
* if it is not responding, we try the other servers first. False positive/negatives are not critical,
* but will increase latency to some extent.
*/
private boolean queueFirstServerIfDown(List<URI> allServers, ProxyRequest proxyRequest) {
if (allServers.size() < 2) { return false; }
URI uri = allServers.get(0);
HttpGet httpget = new HttpGet(uri);
int timeout = 500;
RequestConfig config = RequestConfig.custom()
.setConnectTimeout(timeout)
.setConnectionRequestTimeout(timeout)
.setSocketTimeout(timeout).build();
try (CloseableHttpClient client = createHttpClient(config, sslContextProvider, zoneRegistry, proxyRequest);
CloseableHttpResponse response = client.execute(httpget)) {
if (response.getStatusLine().getStatusCode() == 200) {
return false;
}
} catch (IOException e) {
}
allServers.remove(0);
allServers.add(uri);
return true;
}
private static CloseableHttpClient createHttpClient(RequestConfig config,
AthenzSslContextProvider sslContextProvider,
ZoneRegistry zoneRegistry,
ProxyRequest proxyRequest) {
AthenzIdentityVerifier hostnameVerifier =
new AthenzIdentityVerifier(
singleton(
zoneRegistry.getConfigServerAthenzService(
ZoneId.from(proxyRequest.getEnvironment(), proxyRequest.getRegion()))));
return HttpClientBuilder.create()
.setUserAgent("config-server-proxy-client")
.setSslcontext(sslContextProvider.get())
.setHostnameVerifier(new AthenzIdentityVerifierAdapter(hostnameVerifier))
.setDefaultRequestConfig(config)
.build();
}
private static class AthenzIdentityVerifierAdapter implements X509HostnameVerifier {
private final AthenzIdentityVerifier verifier;
AthenzIdentityVerifierAdapter(AthenzIdentityVerifier verifier) {
this.verifier = verifier;
}
@Override
public boolean verify(String hostname, SSLSession sslSession) {
return verifier.verify(hostname, sslSession);
}
@Override
public void verify(String host, SSLSocket ssl) { /* All sockets accepted */}
@Override
public void verify(String hostname, X509Certificate certificate) throws SSLException {
AthenzIdentity identity = AthenzIdentities.from(certificate);
if (!verifier.isTrusted(identity)) {
throw new SSLException("Athenz identity is not trusted: " + identity.getFullName());
}
}
@Override
public void verify(String hostname, String[] cns, String[] subjectAlts) throws SSLException {
AthenzIdentity identity = AthenzIdentities.from(cns[0]);
if (!verifier.isTrusted(identity)) {
throw new SSLException("Athenz identity is not trusted: " + identity.getFullName());
}
}
}
} |
Optional keys is rarely a good idea. If it's optional the key shouldn't have been added to the map in the first place. If you need to do something for the empty value, use the "zero" value of the type instead (e.g. `Instant.EPOCH` in this case). | private List<Job> computeReadyJobs(ApplicationId id) {
List<Job> jobs = new ArrayList<>();
applications().get(id).ifPresent(application -> {
List<Step> steps = application.deploymentSpec().steps().isEmpty()
? singletonList(new DeploymentSpec.DeclaredZone(test))
: application.deploymentSpec().steps();
List<Step> productionSteps = steps.stream().filter(step -> step.deploysTo(prod) || step.zones().isEmpty()).collect(toList());
Optional<Instant> completedAt = Optional.of(Instant.EPOCH);
String reason = "New change available";
List<Job> testJobs = null;
for (Step step : productionSteps) {
Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet());
Map<Optional<Instant>, List<JobType>> jobsByCompletion = stepJobs.stream().collect(groupingBy(job -> completedAt(application.change(), application, job)));
if (jobsByCompletion.containsKey(empty())) {
for (JobType job : jobsByCompletion.get(empty())) {
State target = targetFor(application, application.change(), deploymentFor(application, job));
if (isVerified(application, target, job)) {
if (completedAt.isPresent())
jobs.add(deploymentJob(application, target, application.change(), job, reason, completedAt.get(), stepJobs));
}
else if (testJobs == null)
testJobs = testJobsFor(application, target, "Testing deployment for " + job.jobName(), completedAt.orElse(clock.instant()));
}
}
else {
if (stepJobs.isEmpty()) {
Duration delay = ((DeploymentSpec.Delay) step).duration();
completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant()));
reason += " after a delay of " + delay;
}
else {
completedAt = jobsByCompletion.keySet().stream().map(Optional::get).max(naturalOrder());
reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", "));
}
}
}
if (testJobs == null)
testJobs = testJobsFor(application, targetFor(application, application.change(), empty()), "Testing last changes outside prod", clock.instant());
jobs.addAll(testJobs);
if (steps.stream().flatMap(step -> step.zones().stream()).map(order::toJob)
.allMatch(job -> completedAt(application.change(), application, job).isPresent()))
applications().lockIfPresent(id, lockedApplication -> applications().store(lockedApplication.withChange(Change.empty())));
});
return jobs;
} | Map<Optional<Instant>, List<JobType>> jobsByCompletion = stepJobs.stream().collect(groupingBy(job -> completedAt(application.change(), application, job))); | private List<Job> computeReadyJobs(ApplicationId id) {
List<Job> jobs = new ArrayList<>();
applications().get(id).ifPresent(application -> {
List<Step> steps = application.deploymentSpec().steps().isEmpty()
? singletonList(new DeploymentSpec.DeclaredZone(test))
: application.deploymentSpec().steps();
List<Step> productionSteps = steps.stream().filter(step -> step.deploysTo(prod) || step.zones().isEmpty()).collect(toList());
Optional<Instant> completedAt = application.deploymentJobs().statusOf(stagingTest)
.flatMap(JobStatus::lastSuccess).map(JobRun::at);
String reason = "New change available";
List<Job> testJobs = null;
for (Step step : productionSteps) {
Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet());
Map<Optional<Instant>, List<JobType>> jobsByCompletion = stepJobs.stream().collect(groupingBy(job -> completedAt(application.change(), application, job)));
if (jobsByCompletion.containsKey(empty())) {
for (JobType job : jobsByCompletion.get(empty())) {
State target = targetFor(application, application.change(), deploymentFor(application, job));
if (isVerified(application, target, job)) {
if (completedAt.isPresent())
jobs.add(deploymentJob(application, target, application.change(), job, reason, completedAt.get(), stepJobs));
}
else if (testJobs == null) {
if ( ! alreadyTriggered(application, target))
testJobs = testJobsFor(application, target, "Testing deployment for " + job.jobName(), completedAt.orElse(clock.instant()));
else
testJobs = emptyList();
}
}
}
else {
if (stepJobs.isEmpty()) {
Duration delay = ((DeploymentSpec.Delay) step).duration();
completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant()));
reason += " after a delay of " + delay;
}
else {
completedAt = jobsByCompletion.keySet().stream().map(Optional::get).max(naturalOrder());
reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", "));
}
}
}
if (testJobs == null)
testJobs = testJobsFor(application, targetFor(application, application.change(), empty()), "Testing last changes outside prod", clock.instant());
jobs.addAll(testJobs);
if (steps.stream().flatMap(step -> step.zones().stream()).map(order::toJob)
.allMatch(job -> completedAt(application.change(), application, job).isPresent()))
applications().lockIfPresent(id, lockedApplication -> applications().store(lockedApplication.withChange(Change.empty())));
});
return jobs;
} | class DeploymentTrigger {
private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName());
private final Controller controller;
private final Clock clock;
private final DeploymentOrder order;
private final BuildService buildService;
public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) {
Objects.requireNonNull(controller, "controller cannot be null");
Objects.requireNonNull(curator, "curator cannot be null");
Objects.requireNonNull(clock, "clock cannot be null");
this.controller = controller;
this.clock = clock;
this.order = new DeploymentOrder(controller::system);
this.buildService = buildService;
}
public DeploymentOrder deploymentOrder() {
return order;
}
/**
* Called each time a job completes (successfully or not) to record information used when deciding what to trigger.
*/
public void notifyOfCompletion(JobReport report) {
log.log(LogLevel.INFO, String.format("Got notified of %s for %s of %s (%d).",
report.jobError().map(JobError::toString).orElse("success"),
report.jobType(),
report.applicationId(),
report.projectId()));
if ( ! applications().get(report.applicationId()).isPresent()) {
log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() +
"': Unknown application '" + report.applicationId() + "'");
return;
}
applications().lockOrThrow(report.applicationId(), application -> {
ApplicationVersion applicationVersion = report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber()))
.orElse(ApplicationVersion.unknown);
application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller);
application = application.withProjectId(OptionalLong.of(report.projectId()));
if (report.jobType() == component && report.success()) {
if (acceptNewApplicationVersion(application))
application = application.withChange(application.change().with(applicationVersion));
else
application = application.withOutstandingChange(Change.of(applicationVersion));
}
applications().store(application);
});
}
/**
* Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs.
*
* Only one job is triggered each run for test jobs, since their environments have limited capacity.
*/
public long triggerReadyJobs() {
return computeReadyJobs().collect(partitioningBy(job -> job.jobType().isTest()))
.entrySet().stream()
.flatMap(entry -> (entry.getKey()
? entry.getValue().stream()
.sorted(comparing(Job::isRetry)
.thenComparing(Job::applicationUpgrade)
.reversed()
.thenComparing(Job::availableSince))
.collect(groupingBy(Job::jobType))
: entry.getValue().stream()
.collect(groupingBy(Job::applicationId)))
.values().stream()
.map(jobs -> (Supplier<Long>) jobs.stream()
.filter(job -> canTrigger(job) && trigger(job))
.limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count))
.parallel().map(Supplier::get).reduce(0L, Long::sum);
}
/**
* Attempts to trigger the given job for the given application and returns the outcome.
*
* If the build service can not find the given job, or claims it is illegal to trigger it,
* the project id is removed from the application owning the job, to prevent further trigger attemps.
*/
public boolean trigger(Job job) {
log.log(LogLevel.INFO, String.format("Attempting to trigger %s: %s (%s)", job, job.reason, job.target));
try {
buildService.trigger(job);
applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withJobTriggering(
job.jobType, new JobRun(-1, job.target.targetPlatform, job.target.targetApplication, job.reason, clock.instant()))));
return true;
}
catch (RuntimeException e) {
log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e);
if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException)
applications().lockOrThrow(job.applicationId(), application ->
applications().store(application.withProjectId(OptionalLong.empty())));
return false;
}
}
/**
* Triggers a change of this application
*
* @param applicationId the application to trigger
* @throws IllegalArgumentException if this application already has an ongoing change
*/
public void triggerChange(ApplicationId applicationId, Change change) {
applications().lockOrThrow(applicationId, application -> {
if (application.change().isPresent() && ! application.deploymentJobs().hasFailures())
throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " +
application.change() + " is already in progress");
application = application.withChange(change);
if (change.application().isPresent())
application = application.withOutstandingChange(Change.empty());
applications().store(application);
});
}
/** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */
public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) {
applications().lockOrThrow(applicationId, application -> {
applications().store(application.withChange(application.change().application()
.map(Change::of)
.filter(change -> keepApplicationChange)
.orElse(Change.empty())));
});
}
public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() {
return computeReadyJobs().collect(groupingBy(Job::jobType));
}
/** Returns the set of all jobs which have changes to propagate from the upstream steps. */
private Stream<Job> computeReadyJobs() {
return ApplicationList.from(applications().asList())
.notPullRequest()
.withProjectId()
.deploying()
.idList().stream()
.map(this::computeReadyJobs)
.flatMap(List::stream);
}
/** Returns whether the given job is currently running; false if completed since last triggered, asking the build service othewise. */
public boolean isRunning(Application application, JobType jobType) {
return ! application.deploymentJobs().statusOf(jobType)
.flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))).orElse(false)
&& buildService.isRunning(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()));
}
public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType) {
Application application = applications().require(applicationId);
if (jobType == component) {
buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName()));
return singletonList(component);
}
State target = targetFor(application, application.change(), deploymentFor(application, jobType));
String reason = ">:o:< Triggered by force! (-o-) |-o-| (=oo=)";
if (isVerified(application, target, jobType)) {
trigger(deploymentJob(application, target, application.change(), jobType, reason, clock.instant(), Collections.emptySet()));
return singletonList(jobType);
}
List<Job> testJobs = testJobsFor(application, target, reason, clock.instant());
testJobs.forEach(this::trigger);
return testJobs.stream().map(Job::jobType).collect(toList());
}
private Job deploymentJob(Application application, State target, Change change, JobType jobType, String reason, Instant availableSince, Collection<JobType> concurrentlyWith) {
boolean isRetry = application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::jobError)
.filter(JobError.outOfCapacity::equals).isPresent();
if (isRetry) reason += "; retrying on out of capacity";
return new Job(application, target, jobType, reason, availableSince, concurrentlyWith, isRetry, change.application().isPresent());
}
private Version targetPlatform(Application application, Change change, Optional<Deployment> deployment) {
return max(deployment.map(Deployment::version), change.platform())
.orElse(application.oldestDeployedPlatform()
.orElse(controller.systemVersion()));
}
private ApplicationVersion targetApplication(Application application, Change change, Optional<Deployment> deployment) {
return max(deployment.map(Deployment::applicationVersion), change.application())
.orElse(application.oldestDeployedApplication()
.orElse(application.deploymentJobs().jobStatus().get(component).lastSuccess().get().applicationVersion()));
}
private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) {
return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2;
}
/**
* Finds the next step to trigger for the given application, if any, and returns these as a list.
*/
private List<Job> testJobsFor(Application application, State target, String reason, Instant availableSince) {
List<Step> steps = application.deploymentSpec().steps();
if (steps.isEmpty()) steps = singletonList(new DeploymentSpec.DeclaredZone(test));
List<Job> jobs = new ArrayList<>();
for (Step step : steps.stream().filter(step -> step.deploysTo(test) || step.deploysTo(staging)).collect(toList())) {
for (JobType jobType : step.zones().stream().map(order::toJob).collect(toList())) {
Optional<JobRun> completion = successOn(application, jobType, target);
if (completion.isPresent())
availableSince = completion.get().at();
else if (isVerified(application, target, jobType))
jobs.add(deploymentJob(application, target, application.change(), jobType, reason, availableSince, emptySet()));
}
}
return jobs;
}
private boolean isVerified(Application application, State state, JobType jobType) {
if (jobType.environment() == staging)
return successOn(application, systemTest, state).isPresent();
if (jobType.environment() == prod)
return successOn(application, stagingTest, state).isPresent()
|| ! JobList.from(application).production()
.lastTriggered().on(state.targetPlatform)
.lastTriggered().on(state.targetApplication)
.isEmpty();
return true;
}
/**
* Returns the instant when the given change is complete for the given application for the given job.
*
* Any job is complete if the given change is already successful on that job.
* A production job is also considered complete if its current change is strictly dominated by what
* is already deployed in its zone, i.e., no parts of the change are upgrades, and at least one
* part is a downgrade, regardless of the status of the job.
*/
private Optional<Instant> completedAt(Change change, Application application, JobType jobType) {
State target = targetFor(application, change, deploymentFor(application, jobType));
Optional<JobRun> lastSuccess = successOn(application, jobType, target);
if (lastSuccess.isPresent() || ! jobType.isProduction())
return lastSuccess.map(JobRun::at);
return deploymentFor(application, jobType)
.filter(deployment -> ! ( change.upgrades(deployment.version())
|| change.upgrades(deployment.applicationVersion()))
&& ( change.downgrades(deployment.version())
|| change.downgrades(deployment.applicationVersion())))
.map(Deployment::at);
}
private Optional<JobRun> successOn(Application application, JobType jobType, State target) {
return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess)
.filter(last -> target.targetPlatform.equals(last.version())
&& target.targetApplication.equals(last.applicationVersion()));
}
private boolean canTrigger(Job job) {
Application application = applications().require(job.applicationId());
if (isRunning(application, job.jobType))
return false;
if ( ! job.jobType.isProduction())
return true;
if ( ! job.concurrentlyWith.containsAll(runningProductionJobsFor(application)))
return false;
if ( ! application.changeAt(clock.instant()).isPresent())
return false;
return true;
}
private List<JobType> runningProductionJobsFor(Application application) {
return application.deploymentJobs().jobStatus().keySet().parallelStream()
.filter(job -> job.isProduction())
.filter(job -> isRunning(application, job))
.collect(toList());
}
private ApplicationController applications() {
return controller.applications();
}
private boolean acceptNewApplicationVersion(LockedApplication application) {
if (application.change().application().isPresent()) return true;
if (application.deploymentJobs().hasFailures()) return true;
return ! application.changeAt(clock.instant()).platform().isPresent();
}
private Optional<Deployment> deploymentFor(Application application, JobType jobType) {
return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()).get()));
}
private State targetFor(Application application, Change change, Optional<Deployment> deployment) {
return new State(targetPlatform(application, change, deployment),
targetApplication(application, change, deployment),
deployment.map(Deployment::version),
deployment.map(Deployment::applicationVersion));
}
private static class Job extends BuildJob {
private final JobType jobType;
private final String reason;
private final Instant availableSince;
private final Collection<JobType> concurrentlyWith;
private final boolean isRetry;
private final boolean isApplicationUpgrade;
private final State target;
private Job(Application application, State target, JobType jobType, String reason, Instant availableSince, Collection<JobType> concurrentlyWith, boolean isRetry, boolean isApplicationUpgrade) {
super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName());
this.jobType = jobType;
this.availableSince = availableSince;
this.concurrentlyWith = concurrentlyWith;
this.reason = reason;
this.isRetry = isRetry;
this.isApplicationUpgrade = isApplicationUpgrade;
this.target = target;
}
JobType jobType() { return jobType; }
Instant availableSince() { return availableSince; }
boolean isRetry() { return isRetry; }
boolean applicationUpgrade() { return isApplicationUpgrade; }
}
private static class State {
private final Version targetPlatform;
private final ApplicationVersion targetApplication;
private final Optional<Version> sourcePlatform;
private final Optional<ApplicationVersion> sourceApplication;
public State(Version targetPlatform, ApplicationVersion targetApplication, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication) {
this.targetPlatform = targetPlatform;
this.targetApplication = targetApplication;
this.sourcePlatform = sourcePlatform;
this.sourceApplication = sourceApplication;
}
@Override
public String toString() {
return String.format("platform %s %s, application %s %s",
targetPlatform,
sourcePlatform.map(v -> "(from " + v + ")").orElse(""),
targetApplication.id(),
sourceApplication.map(v -> "(from " + v.id() + ")").orElse(""));
}
}
} | class DeploymentTrigger {
private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName());
private final Controller controller;
private final Clock clock;
private final DeploymentOrder order;
private final BuildService buildService;
public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) {
Objects.requireNonNull(controller, "controller cannot be null");
Objects.requireNonNull(curator, "curator cannot be null");
Objects.requireNonNull(clock, "clock cannot be null");
this.controller = controller;
this.clock = clock;
this.order = new DeploymentOrder(controller::system);
this.buildService = buildService;
}
public DeploymentOrder deploymentOrder() {
return order;
}
/**
* Called each time a job completes (successfully or not) to record information used when deciding what to trigger.
*/
public void notifyOfCompletion(JobReport report) {
log.log(LogLevel.DEBUG, String.format("Got notified of %s for %s of %s (%d).",
report.jobError().map(JobError::toString).orElse("success"),
report.jobType(),
report.applicationId(),
report.projectId()));
if ( ! applications().get(report.applicationId()).isPresent()) {
log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() +
"': Unknown application '" + report.applicationId() + "'");
return;
}
applications().lockOrThrow(report.applicationId(), application -> {
ApplicationVersion applicationVersion = report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber()))
.orElse(ApplicationVersion.unknown);
application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller);
application = application.withProjectId(OptionalLong.of(report.projectId()));
if (report.jobType() == component && report.success()) {
if (acceptNewApplicationVersion(application))
application = application.withChange(application.change().with(applicationVersion));
else
application = application.withOutstandingChange(Change.of(applicationVersion));
}
applications().store(application);
});
}
/**
* Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs.
*
* Only one job is triggered each run for test jobs, since their environments have limited capacity.
*/
public long triggerReadyJobs() {
return computeReadyJobs().collect(partitioningBy(job -> job.jobType().isTest()))
.entrySet().stream()
.flatMap(entry -> (entry.getKey()
? entry.getValue().stream()
.sorted(comparing(Job::isRetry)
.thenComparing(Job::applicationUpgrade)
.reversed()
.thenComparing(Job::availableSince))
.collect(groupingBy(Job::jobType))
: entry.getValue().stream()
.collect(groupingBy(Job::applicationId)))
.values().stream()
.map(jobs -> (Supplier<Long>) jobs.stream()
.filter(job -> canTrigger(job) && trigger(job))
.limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count))
.parallel().map(Supplier::get).reduce(0L, Long::sum);
}
/**
* Attempts to trigger the given job for the given application and returns the outcome.
*
* If the build service can not find the given job, or claims it is illegal to trigger it,
* the project id is removed from the application owning the job, to prevent further trigger attemps.
*/
public boolean trigger(Job job) {
log.log(LogLevel.INFO, String.format("Attempting to trigger %s: %s (%s)", job, job.reason, job.target));
try {
buildService.trigger(job);
applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withJobTriggering(
job.jobType, new JobRun(-1, job.target.targetPlatform, job.target.targetApplication, job.reason, clock.instant()))));
return true;
}
catch (RuntimeException e) {
log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e);
if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException)
applications().lockOrThrow(job.applicationId(), application ->
applications().store(application.withProjectId(OptionalLong.empty())));
return false;
}
}
/**
* Triggers a change of this application
*
* @param applicationId the application to trigger
* @throws IllegalArgumentException if this application already has an ongoing change
*/
public void triggerChange(ApplicationId applicationId, Change change) {
applications().lockOrThrow(applicationId, application -> {
if (application.change().isPresent() && ! application.deploymentJobs().hasFailures())
throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " +
application.change() + " is already in progress");
application = application.withChange(change);
if (change.application().isPresent())
application = application.withOutstandingChange(Change.empty());
applications().store(application);
});
}
/** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */
public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) {
applications().lockOrThrow(applicationId, application -> {
applications().store(application.withChange(application.change().application()
.map(Change::of)
.filter(change -> keepApplicationChange)
.orElse(Change.empty())));
});
}
public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() {
return computeReadyJobs().collect(groupingBy(Job::jobType));
}
/** Returns the set of all jobs which have changes to propagate from the upstream steps. */
private Stream<Job> computeReadyJobs() {
return ApplicationList.from(applications().asList())
.notPullRequest()
.withProjectId()
.deploying()
.idList().stream()
.map(this::computeReadyJobs)
.flatMap(List::stream);
}
/** Returns whether the given job is currently running; false if completed since last triggered, asking the build service othewise. */
public boolean isRunning(Application application, JobType jobType) {
return ! application.deploymentJobs().statusOf(jobType)
.flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))).orElse(false)
&& buildService.isRunning(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()));
}
public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType) {
Application application = applications().require(applicationId);
if (jobType == component) {
buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName()));
return singletonList(component);
}
State target = targetFor(application, application.change(), deploymentFor(application, jobType));
String reason = ">:o:< Triggered by force! (-o-) |-o-| (=oo=)";
if (isVerified(application, target, jobType)) {
trigger(deploymentJob(application, target, application.change(), jobType, reason, clock.instant(), Collections.emptySet()));
return singletonList(jobType);
}
List<Job> testJobs = testJobsFor(application, target, reason, clock.instant());
testJobs.forEach(this::trigger);
return testJobs.stream().map(Job::jobType).collect(toList());
}
private Job deploymentJob(Application application, State target, Change change, JobType jobType, String reason, Instant availableSince, Collection<JobType> concurrentlyWith) {
boolean isRetry = application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::jobError)
.filter(JobError.outOfCapacity::equals).isPresent();
if (isRetry) reason += "; retrying on out of capacity";
return new Job(application, target, jobType, reason, availableSince, concurrentlyWith, isRetry, change.application().isPresent());
}
private Version targetPlatform(Application application, Change change, Optional<Deployment> deployment) {
return max(deployment.map(Deployment::version), change.platform())
.orElse(application.oldestDeployedPlatform()
.orElse(controller.systemVersion()));
}
private ApplicationVersion targetApplication(Application application, Change change, Optional<Deployment> deployment) {
return max(deployment.map(Deployment::applicationVersion), change.application())
.orElse(application.oldestDeployedApplication()
.orElse(application.deploymentJobs().jobStatus().get(component).lastSuccess().get().applicationVersion()));
}
private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) {
return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2;
}
/**
* Finds the next step to trigger for the given application, if any, and returns these as a list.
*/
private List<Job> testJobsFor(Application application, State target, String reason, Instant availableSince) {
List<Step> steps = application.deploymentSpec().steps();
if (steps.isEmpty()) steps = singletonList(new DeploymentSpec.DeclaredZone(test));
List<Job> jobs = new ArrayList<>();
for (Step step : steps.stream().filter(step -> step.deploysTo(test) || step.deploysTo(staging)).collect(toList())) {
for (JobType jobType : step.zones().stream().map(order::toJob).collect(toList())) {
Optional<JobRun> completion = successOn(application, jobType, target);
if (completion.isPresent())
availableSince = completion.get().at();
else if (isVerified(application, target, jobType))
jobs.add(deploymentJob(application, target, application.change(), jobType, reason, availableSince, emptySet()));
}
}
return jobs;
}
private boolean isVerified(Application application, State state, JobType jobType) {
if (jobType.environment() == staging)
return successOn(application, systemTest, state).isPresent();
if (jobType.environment() == prod)
return successOn(application, stagingTest, state).isPresent()
|| ! JobList.from(application).production()
.lastTriggered().on(state.targetPlatform)
.lastTriggered().on(state.targetApplication)
.isEmpty();
return true;
}
private Optional<Instant> testedAt(Application application, State target) {
return max(successOn(application, systemTest, target).map(JobRun::at),
successOn(application, stagingTest, target).map(JobRun::at));
}
private boolean alreadyTriggered(Application application, State target) {
return ! JobList.from(application).production()
.lastTriggered().on(target.targetPlatform)
.lastTriggered().on(target.targetApplication)
.isEmpty();
}
/**
* Returns the instant when the given change is complete for the given application for the given job.
*
* Any job is complete if the given change is already successful on that job.
* A production job is also considered complete if its current change is strictly dominated by what
* is already deployed in its zone, i.e., no parts of the change are upgrades, and at least one
* part is a downgrade, regardless of the status of the job.
*/
private Optional<Instant> completedAt(Change change, Application application, JobType jobType) {
State target = targetFor(application, change, deploymentFor(application, jobType));
Optional<JobRun> lastSuccess = successOn(application, jobType, target);
if (lastSuccess.isPresent() || ! jobType.isProduction())
return lastSuccess.map(JobRun::at);
return deploymentFor(application, jobType)
.filter(deployment -> ! ( change.upgrades(deployment.version())
|| change.upgrades(deployment.applicationVersion()))
&& ( change.downgrades(deployment.version())
|| change.downgrades(deployment.applicationVersion())))
.map(Deployment::at);
}
private Optional<JobRun> successOn(Application application, JobType jobType, State target) {
return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess)
.filter(last -> target.targetPlatform.equals(last.version())
&& target.targetApplication.equals(last.applicationVersion()));
}
private boolean canTrigger(Job job) {
Application application = applications().require(job.applicationId());
if (isRunning(application, job.jobType))
return false;
if ( ! job.jobType.isProduction())
return true;
if ( ! job.concurrentlyWith.containsAll(runningProductionJobsFor(application)))
return false;
if ( ! application.changeAt(clock.instant()).isPresent())
return false;
return true;
}
private List<JobType> runningProductionJobsFor(Application application) {
return application.deploymentJobs().jobStatus().keySet().parallelStream()
.filter(job -> job.isProduction())
.filter(job -> isRunning(application, job))
.collect(toList());
}
private ApplicationController applications() {
return controller.applications();
}
private boolean acceptNewApplicationVersion(LockedApplication application) {
if (application.change().application().isPresent()) return true;
if (application.deploymentJobs().hasFailures()) return true;
return ! application.changeAt(clock.instant()).platform().isPresent();
}
private Optional<Deployment> deploymentFor(Application application, JobType jobType) {
return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()).get()));
}
private State targetFor(Application application, Change change, Optional<Deployment> deployment) {
return new State(targetPlatform(application, change, deployment),
targetApplication(application, change, deployment),
deployment.map(Deployment::version),
deployment.map(Deployment::applicationVersion));
}
private static class Job extends BuildJob {
private final JobType jobType;
private final String reason;
private final Instant availableSince;
private final Collection<JobType> concurrentlyWith;
private final boolean isRetry;
private final boolean isApplicationUpgrade;
private final State target;
private Job(Application application, State target, JobType jobType, String reason, Instant availableSince, Collection<JobType> concurrentlyWith, boolean isRetry, boolean isApplicationUpgrade) {
super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName());
this.jobType = jobType;
this.availableSince = availableSince;
this.concurrentlyWith = concurrentlyWith;
this.reason = reason;
this.isRetry = isRetry;
this.isApplicationUpgrade = isApplicationUpgrade;
this.target = target;
}
JobType jobType() { return jobType; }
Instant availableSince() { return availableSince; }
boolean isRetry() { return isRetry; }
boolean applicationUpgrade() { return isApplicationUpgrade; }
}
private static class State {
private final Version targetPlatform;
private final ApplicationVersion targetApplication;
private final Optional<Version> sourcePlatform;
private final Optional<ApplicationVersion> sourceApplication;
public State(Version targetPlatform, ApplicationVersion targetApplication, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication) {
this.targetPlatform = targetPlatform;
this.targetApplication = targetApplication;
this.sourcePlatform = sourcePlatform;
this.sourceApplication = sourceApplication;
}
@Override
public String toString() {
return String.format("platform %s %s, application %s %s",
targetPlatform,
sourcePlatform.map(v -> "(from " + v + ")").orElse(""),
targetApplication.id(),
sourceApplication.map(v -> "(from " + v.id() + ")").orElse(""));
}
}
} |
Rarely, but in this case I think it's OK: the map is used only as a local variable, so it doesn't leak out anywhere; and the key is exactly an Optional<Instant> -- it represents both whether its value is complete, and if it is, when. I prefer this over generating the values several times, since I prefer to compute things _once_, and reuse the results. (Efficiency is one consideration, but perhaps more important is that you do things right both or none of the times.) | private List<Job> computeReadyJobs(ApplicationId id) {
List<Job> jobs = new ArrayList<>();
applications().get(id).ifPresent(application -> {
List<Step> steps = application.deploymentSpec().steps().isEmpty()
? singletonList(new DeploymentSpec.DeclaredZone(test))
: application.deploymentSpec().steps();
List<Step> productionSteps = steps.stream().filter(step -> step.deploysTo(prod) || step.zones().isEmpty()).collect(toList());
Optional<Instant> completedAt = Optional.of(Instant.EPOCH);
String reason = "New change available";
List<Job> testJobs = null;
for (Step step : productionSteps) {
Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet());
Map<Optional<Instant>, List<JobType>> jobsByCompletion = stepJobs.stream().collect(groupingBy(job -> completedAt(application.change(), application, job)));
if (jobsByCompletion.containsKey(empty())) {
for (JobType job : jobsByCompletion.get(empty())) {
State target = targetFor(application, application.change(), deploymentFor(application, job));
if (isVerified(application, target, job)) {
if (completedAt.isPresent())
jobs.add(deploymentJob(application, target, application.change(), job, reason, completedAt.get(), stepJobs));
}
else if (testJobs == null)
testJobs = testJobsFor(application, target, "Testing deployment for " + job.jobName(), completedAt.orElse(clock.instant()));
}
}
else {
if (stepJobs.isEmpty()) {
Duration delay = ((DeploymentSpec.Delay) step).duration();
completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant()));
reason += " after a delay of " + delay;
}
else {
completedAt = jobsByCompletion.keySet().stream().map(Optional::get).max(naturalOrder());
reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", "));
}
}
}
if (testJobs == null)
testJobs = testJobsFor(application, targetFor(application, application.change(), empty()), "Testing last changes outside prod", clock.instant());
jobs.addAll(testJobs);
if (steps.stream().flatMap(step -> step.zones().stream()).map(order::toJob)
.allMatch(job -> completedAt(application.change(), application, job).isPresent()))
applications().lockIfPresent(id, lockedApplication -> applications().store(lockedApplication.withChange(Change.empty())));
});
return jobs;
} | Map<Optional<Instant>, List<JobType>> jobsByCompletion = stepJobs.stream().collect(groupingBy(job -> completedAt(application.change(), application, job))); | private List<Job> computeReadyJobs(ApplicationId id) {
List<Job> jobs = new ArrayList<>();
applications().get(id).ifPresent(application -> {
List<Step> steps = application.deploymentSpec().steps().isEmpty()
? singletonList(new DeploymentSpec.DeclaredZone(test))
: application.deploymentSpec().steps();
List<Step> productionSteps = steps.stream().filter(step -> step.deploysTo(prod) || step.zones().isEmpty()).collect(toList());
Optional<Instant> completedAt = application.deploymentJobs().statusOf(stagingTest)
.flatMap(JobStatus::lastSuccess).map(JobRun::at);
String reason = "New change available";
List<Job> testJobs = null;
for (Step step : productionSteps) {
Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet());
Map<Optional<Instant>, List<JobType>> jobsByCompletion = stepJobs.stream().collect(groupingBy(job -> completedAt(application.change(), application, job)));
if (jobsByCompletion.containsKey(empty())) {
for (JobType job : jobsByCompletion.get(empty())) {
State target = targetFor(application, application.change(), deploymentFor(application, job));
if (isVerified(application, target, job)) {
if (completedAt.isPresent())
jobs.add(deploymentJob(application, target, application.change(), job, reason, completedAt.get(), stepJobs));
}
else if (testJobs == null) {
if ( ! alreadyTriggered(application, target))
testJobs = testJobsFor(application, target, "Testing deployment for " + job.jobName(), completedAt.orElse(clock.instant()));
else
testJobs = emptyList();
}
}
}
else {
if (stepJobs.isEmpty()) {
Duration delay = ((DeploymentSpec.Delay) step).duration();
completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant()));
reason += " after a delay of " + delay;
}
else {
completedAt = jobsByCompletion.keySet().stream().map(Optional::get).max(naturalOrder());
reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", "));
}
}
}
if (testJobs == null)
testJobs = testJobsFor(application, targetFor(application, application.change(), empty()), "Testing last changes outside prod", clock.instant());
jobs.addAll(testJobs);
if (steps.stream().flatMap(step -> step.zones().stream()).map(order::toJob)
.allMatch(job -> completedAt(application.change(), application, job).isPresent()))
applications().lockIfPresent(id, lockedApplication -> applications().store(lockedApplication.withChange(Change.empty())));
});
return jobs;
} | class DeploymentTrigger {
private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName());
private final Controller controller;
private final Clock clock;
private final DeploymentOrder order;
private final BuildService buildService;
public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) {
Objects.requireNonNull(controller, "controller cannot be null");
Objects.requireNonNull(curator, "curator cannot be null");
Objects.requireNonNull(clock, "clock cannot be null");
this.controller = controller;
this.clock = clock;
this.order = new DeploymentOrder(controller::system);
this.buildService = buildService;
}
public DeploymentOrder deploymentOrder() {
return order;
}
/**
* Called each time a job completes (successfully or not) to record information used when deciding what to trigger.
*/
public void notifyOfCompletion(JobReport report) {
log.log(LogLevel.INFO, String.format("Got notified of %s for %s of %s (%d).",
report.jobError().map(JobError::toString).orElse("success"),
report.jobType(),
report.applicationId(),
report.projectId()));
if ( ! applications().get(report.applicationId()).isPresent()) {
log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() +
"': Unknown application '" + report.applicationId() + "'");
return;
}
applications().lockOrThrow(report.applicationId(), application -> {
ApplicationVersion applicationVersion = report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber()))
.orElse(ApplicationVersion.unknown);
application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller);
application = application.withProjectId(OptionalLong.of(report.projectId()));
if (report.jobType() == component && report.success()) {
if (acceptNewApplicationVersion(application))
application = application.withChange(application.change().with(applicationVersion));
else
application = application.withOutstandingChange(Change.of(applicationVersion));
}
applications().store(application);
});
}
/**
* Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs.
*
* Only one job is triggered each run for test jobs, since their environments have limited capacity.
*/
public long triggerReadyJobs() {
return computeReadyJobs().collect(partitioningBy(job -> job.jobType().isTest()))
.entrySet().stream()
.flatMap(entry -> (entry.getKey()
? entry.getValue().stream()
.sorted(comparing(Job::isRetry)
.thenComparing(Job::applicationUpgrade)
.reversed()
.thenComparing(Job::availableSince))
.collect(groupingBy(Job::jobType))
: entry.getValue().stream()
.collect(groupingBy(Job::applicationId)))
.values().stream()
.map(jobs -> (Supplier<Long>) jobs.stream()
.filter(job -> canTrigger(job) && trigger(job))
.limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count))
.parallel().map(Supplier::get).reduce(0L, Long::sum);
}
/**
* Attempts to trigger the given job for the given application and returns the outcome.
*
* If the build service can not find the given job, or claims it is illegal to trigger it,
* the project id is removed from the application owning the job, to prevent further trigger attemps.
*/
public boolean trigger(Job job) {
log.log(LogLevel.INFO, String.format("Attempting to trigger %s: %s (%s)", job, job.reason, job.target));
try {
buildService.trigger(job);
applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withJobTriggering(
job.jobType, new JobRun(-1, job.target.targetPlatform, job.target.targetApplication, job.reason, clock.instant()))));
return true;
}
catch (RuntimeException e) {
log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e);
if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException)
applications().lockOrThrow(job.applicationId(), application ->
applications().store(application.withProjectId(OptionalLong.empty())));
return false;
}
}
/**
* Triggers a change of this application
*
* @param applicationId the application to trigger
* @throws IllegalArgumentException if this application already has an ongoing change
*/
public void triggerChange(ApplicationId applicationId, Change change) {
applications().lockOrThrow(applicationId, application -> {
if (application.change().isPresent() && ! application.deploymentJobs().hasFailures())
throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " +
application.change() + " is already in progress");
application = application.withChange(change);
if (change.application().isPresent())
application = application.withOutstandingChange(Change.empty());
applications().store(application);
});
}
/** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */
public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) {
applications().lockOrThrow(applicationId, application -> {
applications().store(application.withChange(application.change().application()
.map(Change::of)
.filter(change -> keepApplicationChange)
.orElse(Change.empty())));
});
}
public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() {
return computeReadyJobs().collect(groupingBy(Job::jobType));
}
/** Returns the set of all jobs which have changes to propagate from the upstream steps. */
private Stream<Job> computeReadyJobs() {
return ApplicationList.from(applications().asList())
.notPullRequest()
.withProjectId()
.deploying()
.idList().stream()
.map(this::computeReadyJobs)
.flatMap(List::stream);
}
/** Returns whether the given job is currently running; false if completed since last triggered, asking the build service othewise. */
public boolean isRunning(Application application, JobType jobType) {
return ! application.deploymentJobs().statusOf(jobType)
.flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))).orElse(false)
&& buildService.isRunning(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()));
}
public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType) {
Application application = applications().require(applicationId);
if (jobType == component) {
buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName()));
return singletonList(component);
}
State target = targetFor(application, application.change(), deploymentFor(application, jobType));
String reason = ">:o:< Triggered by force! (-o-) |-o-| (=oo=)";
if (isVerified(application, target, jobType)) {
trigger(deploymentJob(application, target, application.change(), jobType, reason, clock.instant(), Collections.emptySet()));
return singletonList(jobType);
}
List<Job> testJobs = testJobsFor(application, target, reason, clock.instant());
testJobs.forEach(this::trigger);
return testJobs.stream().map(Job::jobType).collect(toList());
}
private Job deploymentJob(Application application, State target, Change change, JobType jobType, String reason, Instant availableSince, Collection<JobType> concurrentlyWith) {
boolean isRetry = application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::jobError)
.filter(JobError.outOfCapacity::equals).isPresent();
if (isRetry) reason += "; retrying on out of capacity";
return new Job(application, target, jobType, reason, availableSince, concurrentlyWith, isRetry, change.application().isPresent());
}
private Version targetPlatform(Application application, Change change, Optional<Deployment> deployment) {
return max(deployment.map(Deployment::version), change.platform())
.orElse(application.oldestDeployedPlatform()
.orElse(controller.systemVersion()));
}
private ApplicationVersion targetApplication(Application application, Change change, Optional<Deployment> deployment) {
return max(deployment.map(Deployment::applicationVersion), change.application())
.orElse(application.oldestDeployedApplication()
.orElse(application.deploymentJobs().jobStatus().get(component).lastSuccess().get().applicationVersion()));
}
private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) {
return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2;
}
/**
* Finds the next step to trigger for the given application, if any, and returns these as a list.
*/
private List<Job> testJobsFor(Application application, State target, String reason, Instant availableSince) {
List<Step> steps = application.deploymentSpec().steps();
if (steps.isEmpty()) steps = singletonList(new DeploymentSpec.DeclaredZone(test));
List<Job> jobs = new ArrayList<>();
for (Step step : steps.stream().filter(step -> step.deploysTo(test) || step.deploysTo(staging)).collect(toList())) {
for (JobType jobType : step.zones().stream().map(order::toJob).collect(toList())) {
Optional<JobRun> completion = successOn(application, jobType, target);
if (completion.isPresent())
availableSince = completion.get().at();
else if (isVerified(application, target, jobType))
jobs.add(deploymentJob(application, target, application.change(), jobType, reason, availableSince, emptySet()));
}
}
return jobs;
}
private boolean isVerified(Application application, State state, JobType jobType) {
if (jobType.environment() == staging)
return successOn(application, systemTest, state).isPresent();
if (jobType.environment() == prod)
return successOn(application, stagingTest, state).isPresent()
|| ! JobList.from(application).production()
.lastTriggered().on(state.targetPlatform)
.lastTriggered().on(state.targetApplication)
.isEmpty();
return true;
}
/**
* Returns the instant when the given change is complete for the given application for the given job.
*
* Any job is complete if the given change is already successful on that job.
* A production job is also considered complete if its current change is strictly dominated by what
* is already deployed in its zone, i.e., no parts of the change are upgrades, and at least one
* part is a downgrade, regardless of the status of the job.
*/
private Optional<Instant> completedAt(Change change, Application application, JobType jobType) {
State target = targetFor(application, change, deploymentFor(application, jobType));
Optional<JobRun> lastSuccess = successOn(application, jobType, target);
if (lastSuccess.isPresent() || ! jobType.isProduction())
return lastSuccess.map(JobRun::at);
return deploymentFor(application, jobType)
.filter(deployment -> ! ( change.upgrades(deployment.version())
|| change.upgrades(deployment.applicationVersion()))
&& ( change.downgrades(deployment.version())
|| change.downgrades(deployment.applicationVersion())))
.map(Deployment::at);
}
private Optional<JobRun> successOn(Application application, JobType jobType, State target) {
return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess)
.filter(last -> target.targetPlatform.equals(last.version())
&& target.targetApplication.equals(last.applicationVersion()));
}
private boolean canTrigger(Job job) {
Application application = applications().require(job.applicationId());
if (isRunning(application, job.jobType))
return false;
if ( ! job.jobType.isProduction())
return true;
if ( ! job.concurrentlyWith.containsAll(runningProductionJobsFor(application)))
return false;
if ( ! application.changeAt(clock.instant()).isPresent())
return false;
return true;
}
private List<JobType> runningProductionJobsFor(Application application) {
return application.deploymentJobs().jobStatus().keySet().parallelStream()
.filter(job -> job.isProduction())
.filter(job -> isRunning(application, job))
.collect(toList());
}
private ApplicationController applications() {
return controller.applications();
}
private boolean acceptNewApplicationVersion(LockedApplication application) {
if (application.change().application().isPresent()) return true;
if (application.deploymentJobs().hasFailures()) return true;
return ! application.changeAt(clock.instant()).platform().isPresent();
}
private Optional<Deployment> deploymentFor(Application application, JobType jobType) {
return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()).get()));
}
private State targetFor(Application application, Change change, Optional<Deployment> deployment) {
return new State(targetPlatform(application, change, deployment),
targetApplication(application, change, deployment),
deployment.map(Deployment::version),
deployment.map(Deployment::applicationVersion));
}
private static class Job extends BuildJob {
private final JobType jobType;
private final String reason;
private final Instant availableSince;
private final Collection<JobType> concurrentlyWith;
private final boolean isRetry;
private final boolean isApplicationUpgrade;
private final State target;
private Job(Application application, State target, JobType jobType, String reason, Instant availableSince, Collection<JobType> concurrentlyWith, boolean isRetry, boolean isApplicationUpgrade) {
super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName());
this.jobType = jobType;
this.availableSince = availableSince;
this.concurrentlyWith = concurrentlyWith;
this.reason = reason;
this.isRetry = isRetry;
this.isApplicationUpgrade = isApplicationUpgrade;
this.target = target;
}
JobType jobType() { return jobType; }
Instant availableSince() { return availableSince; }
boolean isRetry() { return isRetry; }
boolean applicationUpgrade() { return isApplicationUpgrade; }
}
private static class State {
private final Version targetPlatform;
private final ApplicationVersion targetApplication;
private final Optional<Version> sourcePlatform;
private final Optional<ApplicationVersion> sourceApplication;
public State(Version targetPlatform, ApplicationVersion targetApplication, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication) {
this.targetPlatform = targetPlatform;
this.targetApplication = targetApplication;
this.sourcePlatform = sourcePlatform;
this.sourceApplication = sourceApplication;
}
@Override
public String toString() {
return String.format("platform %s %s, application %s %s",
targetPlatform,
sourcePlatform.map(v -> "(from " + v + ")").orElse(""),
targetApplication.id(),
sourceApplication.map(v -> "(from " + v.id() + ")").orElse(""));
}
}
} | class DeploymentTrigger {
private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName());
private final Controller controller;
private final Clock clock;
private final DeploymentOrder order;
private final BuildService buildService;
public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) {
Objects.requireNonNull(controller, "controller cannot be null");
Objects.requireNonNull(curator, "curator cannot be null");
Objects.requireNonNull(clock, "clock cannot be null");
this.controller = controller;
this.clock = clock;
this.order = new DeploymentOrder(controller::system);
this.buildService = buildService;
}
public DeploymentOrder deploymentOrder() {
return order;
}
/**
* Called each time a job completes (successfully or not) to record information used when deciding what to trigger.
*/
public void notifyOfCompletion(JobReport report) {
log.log(LogLevel.DEBUG, String.format("Got notified of %s for %s of %s (%d).",
report.jobError().map(JobError::toString).orElse("success"),
report.jobType(),
report.applicationId(),
report.projectId()));
if ( ! applications().get(report.applicationId()).isPresent()) {
log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() +
"': Unknown application '" + report.applicationId() + "'");
return;
}
applications().lockOrThrow(report.applicationId(), application -> {
ApplicationVersion applicationVersion = report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber()))
.orElse(ApplicationVersion.unknown);
application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller);
application = application.withProjectId(OptionalLong.of(report.projectId()));
if (report.jobType() == component && report.success()) {
if (acceptNewApplicationVersion(application))
application = application.withChange(application.change().with(applicationVersion));
else
application = application.withOutstandingChange(Change.of(applicationVersion));
}
applications().store(application);
});
}
/**
* Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs.
*
* Only one job is triggered each run for test jobs, since their environments have limited capacity.
*/
public long triggerReadyJobs() {
return computeReadyJobs().collect(partitioningBy(job -> job.jobType().isTest()))
.entrySet().stream()
.flatMap(entry -> (entry.getKey()
? entry.getValue().stream()
.sorted(comparing(Job::isRetry)
.thenComparing(Job::applicationUpgrade)
.reversed()
.thenComparing(Job::availableSince))
.collect(groupingBy(Job::jobType))
: entry.getValue().stream()
.collect(groupingBy(Job::applicationId)))
.values().stream()
.map(jobs -> (Supplier<Long>) jobs.stream()
.filter(job -> canTrigger(job) && trigger(job))
.limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count))
.parallel().map(Supplier::get).reduce(0L, Long::sum);
}
/**
* Attempts to trigger the given job for the given application and returns the outcome.
*
* If the build service can not find the given job, or claims it is illegal to trigger it,
* the project id is removed from the application owning the job, to prevent further trigger attemps.
*/
public boolean trigger(Job job) {
log.log(LogLevel.INFO, String.format("Attempting to trigger %s: %s (%s)", job, job.reason, job.target));
try {
buildService.trigger(job);
applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withJobTriggering(
job.jobType, new JobRun(-1, job.target.targetPlatform, job.target.targetApplication, job.reason, clock.instant()))));
return true;
}
catch (RuntimeException e) {
log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e);
if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException)
applications().lockOrThrow(job.applicationId(), application ->
applications().store(application.withProjectId(OptionalLong.empty())));
return false;
}
}
/**
* Triggers a change of this application
*
* @param applicationId the application to trigger
* @throws IllegalArgumentException if this application already has an ongoing change
*/
public void triggerChange(ApplicationId applicationId, Change change) {
applications().lockOrThrow(applicationId, application -> {
if (application.change().isPresent() && ! application.deploymentJobs().hasFailures())
throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " +
application.change() + " is already in progress");
application = application.withChange(change);
if (change.application().isPresent())
application = application.withOutstandingChange(Change.empty());
applications().store(application);
});
}
/** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */
public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) {
applications().lockOrThrow(applicationId, application -> {
applications().store(application.withChange(application.change().application()
.map(Change::of)
.filter(change -> keepApplicationChange)
.orElse(Change.empty())));
});
}
public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() {
return computeReadyJobs().collect(groupingBy(Job::jobType));
}
/** Returns the set of all jobs which have changes to propagate from the upstream steps. */
private Stream<Job> computeReadyJobs() {
return ApplicationList.from(applications().asList())
.notPullRequest()
.withProjectId()
.deploying()
.idList().stream()
.map(this::computeReadyJobs)
.flatMap(List::stream);
}
/** Returns whether the given job is currently running; false if completed since last triggered, asking the build service othewise. */
public boolean isRunning(Application application, JobType jobType) {
return ! application.deploymentJobs().statusOf(jobType)
.flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))).orElse(false)
&& buildService.isRunning(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()));
}
public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType) {
Application application = applications().require(applicationId);
if (jobType == component) {
buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName()));
return singletonList(component);
}
State target = targetFor(application, application.change(), deploymentFor(application, jobType));
String reason = ">:o:< Triggered by force! (-o-) |-o-| (=oo=)";
if (isVerified(application, target, jobType)) {
trigger(deploymentJob(application, target, application.change(), jobType, reason, clock.instant(), Collections.emptySet()));
return singletonList(jobType);
}
List<Job> testJobs = testJobsFor(application, target, reason, clock.instant());
testJobs.forEach(this::trigger);
return testJobs.stream().map(Job::jobType).collect(toList());
}
private Job deploymentJob(Application application, State target, Change change, JobType jobType, String reason, Instant availableSince, Collection<JobType> concurrentlyWith) {
boolean isRetry = application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::jobError)
.filter(JobError.outOfCapacity::equals).isPresent();
if (isRetry) reason += "; retrying on out of capacity";
return new Job(application, target, jobType, reason, availableSince, concurrentlyWith, isRetry, change.application().isPresent());
}
private Version targetPlatform(Application application, Change change, Optional<Deployment> deployment) {
return max(deployment.map(Deployment::version), change.platform())
.orElse(application.oldestDeployedPlatform()
.orElse(controller.systemVersion()));
}
private ApplicationVersion targetApplication(Application application, Change change, Optional<Deployment> deployment) {
return max(deployment.map(Deployment::applicationVersion), change.application())
.orElse(application.oldestDeployedApplication()
.orElse(application.deploymentJobs().jobStatus().get(component).lastSuccess().get().applicationVersion()));
}
private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) {
return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2;
}
/**
* Finds the next step to trigger for the given application, if any, and returns these as a list.
*/
private List<Job> testJobsFor(Application application, State target, String reason, Instant availableSince) {
List<Step> steps = application.deploymentSpec().steps();
if (steps.isEmpty()) steps = singletonList(new DeploymentSpec.DeclaredZone(test));
List<Job> jobs = new ArrayList<>();
for (Step step : steps.stream().filter(step -> step.deploysTo(test) || step.deploysTo(staging)).collect(toList())) {
for (JobType jobType : step.zones().stream().map(order::toJob).collect(toList())) {
Optional<JobRun> completion = successOn(application, jobType, target);
if (completion.isPresent())
availableSince = completion.get().at();
else if (isVerified(application, target, jobType))
jobs.add(deploymentJob(application, target, application.change(), jobType, reason, availableSince, emptySet()));
}
}
return jobs;
}
private boolean isVerified(Application application, State state, JobType jobType) {
if (jobType.environment() == staging)
return successOn(application, systemTest, state).isPresent();
if (jobType.environment() == prod)
return successOn(application, stagingTest, state).isPresent()
|| ! JobList.from(application).production()
.lastTriggered().on(state.targetPlatform)
.lastTriggered().on(state.targetApplication)
.isEmpty();
return true;
}
private Optional<Instant> testedAt(Application application, State target) {
return max(successOn(application, systemTest, target).map(JobRun::at),
successOn(application, stagingTest, target).map(JobRun::at));
}
private boolean alreadyTriggered(Application application, State target) {
return ! JobList.from(application).production()
.lastTriggered().on(target.targetPlatform)
.lastTriggered().on(target.targetApplication)
.isEmpty();
}
/**
* Returns the instant when the given change is complete for the given application for the given job.
*
* Any job is complete if the given change is already successful on that job.
* A production job is also considered complete if its current change is strictly dominated by what
* is already deployed in its zone, i.e., no parts of the change are upgrades, and at least one
* part is a downgrade, regardless of the status of the job.
*/
private Optional<Instant> completedAt(Change change, Application application, JobType jobType) {
State target = targetFor(application, change, deploymentFor(application, jobType));
Optional<JobRun> lastSuccess = successOn(application, jobType, target);
if (lastSuccess.isPresent() || ! jobType.isProduction())
return lastSuccess.map(JobRun::at);
return deploymentFor(application, jobType)
.filter(deployment -> ! ( change.upgrades(deployment.version())
|| change.upgrades(deployment.applicationVersion()))
&& ( change.downgrades(deployment.version())
|| change.downgrades(deployment.applicationVersion())))
.map(Deployment::at);
}
private Optional<JobRun> successOn(Application application, JobType jobType, State target) {
return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess)
.filter(last -> target.targetPlatform.equals(last.version())
&& target.targetApplication.equals(last.applicationVersion()));
}
private boolean canTrigger(Job job) {
Application application = applications().require(job.applicationId());
if (isRunning(application, job.jobType))
return false;
if ( ! job.jobType.isProduction())
return true;
if ( ! job.concurrentlyWith.containsAll(runningProductionJobsFor(application)))
return false;
if ( ! application.changeAt(clock.instant()).isPresent())
return false;
return true;
}
private List<JobType> runningProductionJobsFor(Application application) {
return application.deploymentJobs().jobStatus().keySet().parallelStream()
.filter(job -> job.isProduction())
.filter(job -> isRunning(application, job))
.collect(toList());
}
private ApplicationController applications() {
return controller.applications();
}
private boolean acceptNewApplicationVersion(LockedApplication application) {
if (application.change().application().isPresent()) return true;
if (application.deploymentJobs().hasFailures()) return true;
return ! application.changeAt(clock.instant()).platform().isPresent();
}
private Optional<Deployment> deploymentFor(Application application, JobType jobType) {
return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()).get()));
}
private State targetFor(Application application, Change change, Optional<Deployment> deployment) {
return new State(targetPlatform(application, change, deployment),
targetApplication(application, change, deployment),
deployment.map(Deployment::version),
deployment.map(Deployment::applicationVersion));
}
private static class Job extends BuildJob {
private final JobType jobType;
private final String reason;
private final Instant availableSince;
private final Collection<JobType> concurrentlyWith;
private final boolean isRetry;
private final boolean isApplicationUpgrade;
private final State target;
private Job(Application application, State target, JobType jobType, String reason, Instant availableSince, Collection<JobType> concurrentlyWith, boolean isRetry, boolean isApplicationUpgrade) {
super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName());
this.jobType = jobType;
this.availableSince = availableSince;
this.concurrentlyWith = concurrentlyWith;
this.reason = reason;
this.isRetry = isRetry;
this.isApplicationUpgrade = isApplicationUpgrade;
this.target = target;
}
JobType jobType() { return jobType; }
Instant availableSince() { return availableSince; }
boolean isRetry() { return isRetry; }
boolean applicationUpgrade() { return isApplicationUpgrade; }
}
private static class State {
private final Version targetPlatform;
private final ApplicationVersion targetApplication;
private final Optional<Version> sourcePlatform;
private final Optional<ApplicationVersion> sourceApplication;
public State(Version targetPlatform, ApplicationVersion targetApplication, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication) {
this.targetPlatform = targetPlatform;
this.targetApplication = targetApplication;
this.sourcePlatform = sourcePlatform;
this.sourceApplication = sourceApplication;
}
@Override
public String toString() {
return String.format("platform %s %s, application %s %s",
targetPlatform,
sourcePlatform.map(v -> "(from " + v + ")").orElse(""),
targetApplication.id(),
sourceApplication.map(v -> "(from " + v.id() + ")").orElse(""));
}
}
} |
Delay will now be from staging for first production job, and from previous production jobs for later production jobs. I'll see if I can make it count down from the latter of the two, for later production jobs. | private List<Job> computeReadyJobs(ApplicationId id) {
List<Job> jobs = new ArrayList<>();
applications().get(id).ifPresent(application -> {
List<Step> steps = application.deploymentSpec().steps().isEmpty()
? singletonList(new DeploymentSpec.DeclaredZone(test))
: application.deploymentSpec().steps();
List<Step> productionSteps = steps.stream().filter(step -> step.deploysTo(prod) || step.zones().isEmpty()).collect(toList());
Optional<Instant> completedAt = application.deploymentJobs().statusOf(stagingTest)
.flatMap(JobStatus::lastSuccess).map(JobRun::at);
String reason = "New change available";
List<Job> testJobs = null;
for (Step step : productionSteps) {
Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet());
Map<Optional<Instant>, List<JobType>> jobsByCompletion = stepJobs.stream().collect(groupingBy(job -> completedAt(application.change(), application, job)));
if (jobsByCompletion.containsKey(empty())) {
for (JobType job : jobsByCompletion.get(empty())) {
State target = targetFor(application, application.change(), deploymentFor(application, job));
if (isVerified(application, target, job)) {
if (completedAt.isPresent())
jobs.add(deploymentJob(application, target, application.change(), job, reason, completedAt.get(), stepJobs));
}
else if (testJobs == null)
testJobs = testJobsFor(application, target, "Testing deployment for " + job.jobName(), completedAt.orElse(clock.instant()));
}
}
else {
if (stepJobs.isEmpty()) {
Duration delay = ((DeploymentSpec.Delay) step).duration();
completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant()));
reason += " after a delay of " + delay;
}
else {
completedAt = jobsByCompletion.keySet().stream().map(Optional::get).max(naturalOrder());
reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", "));
}
}
}
if (testJobs == null)
testJobs = testJobsFor(application, targetFor(application, application.change(), empty()), "Testing last changes outside prod", clock.instant());
jobs.addAll(testJobs);
if (steps.stream().flatMap(step -> step.zones().stream()).map(order::toJob)
.allMatch(job -> completedAt(application.change(), application, job).isPresent()))
applications().lockIfPresent(id, lockedApplication -> applications().store(lockedApplication.withChange(Change.empty())));
});
return jobs;
} | .flatMap(JobStatus::lastSuccess).map(JobRun::at); | private List<Job> computeReadyJobs(ApplicationId id) {
List<Job> jobs = new ArrayList<>();
applications().get(id).ifPresent(application -> {
List<Step> steps = application.deploymentSpec().steps().isEmpty()
? singletonList(new DeploymentSpec.DeclaredZone(test))
: application.deploymentSpec().steps();
List<Step> productionSteps = steps.stream().filter(step -> step.deploysTo(prod) || step.zones().isEmpty()).collect(toList());
Optional<Instant> completedAt = application.deploymentJobs().statusOf(stagingTest)
.flatMap(JobStatus::lastSuccess).map(JobRun::at);
String reason = "New change available";
List<Job> testJobs = null;
for (Step step : productionSteps) {
Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet());
Map<Optional<Instant>, List<JobType>> jobsByCompletion = stepJobs.stream().collect(groupingBy(job -> completedAt(application.change(), application, job)));
if (jobsByCompletion.containsKey(empty())) {
for (JobType job : jobsByCompletion.get(empty())) {
State target = targetFor(application, application.change(), deploymentFor(application, job));
if (isVerified(application, target, job)) {
if (completedAt.isPresent())
jobs.add(deploymentJob(application, target, application.change(), job, reason, completedAt.get(), stepJobs));
}
else if (testJobs == null) {
if ( ! alreadyTriggered(application, target))
testJobs = testJobsFor(application, target, "Testing deployment for " + job.jobName(), completedAt.orElse(clock.instant()));
else
testJobs = emptyList();
}
}
}
else {
if (stepJobs.isEmpty()) {
Duration delay = ((DeploymentSpec.Delay) step).duration();
completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant()));
reason += " after a delay of " + delay;
}
else {
completedAt = jobsByCompletion.keySet().stream().map(Optional::get).max(naturalOrder());
reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", "));
}
}
}
if (testJobs == null)
testJobs = testJobsFor(application, targetFor(application, application.change(), empty()), "Testing last changes outside prod", clock.instant());
jobs.addAll(testJobs);
if (steps.stream().flatMap(step -> step.zones().stream()).map(order::toJob)
.allMatch(job -> completedAt(application.change(), application, job).isPresent()))
applications().lockIfPresent(id, lockedApplication -> applications().store(lockedApplication.withChange(Change.empty())));
});
return jobs;
} | class DeploymentTrigger {
private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName());
private final Controller controller;
private final Clock clock;
private final DeploymentOrder order;
private final BuildService buildService;
public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) {
Objects.requireNonNull(controller, "controller cannot be null");
Objects.requireNonNull(curator, "curator cannot be null");
Objects.requireNonNull(clock, "clock cannot be null");
this.controller = controller;
this.clock = clock;
this.order = new DeploymentOrder(controller::system);
this.buildService = buildService;
}
public DeploymentOrder deploymentOrder() {
return order;
}
/**
* Called each time a job completes (successfully or not) to record information used when deciding what to trigger.
*/
public void notifyOfCompletion(JobReport report) {
log.log(LogLevel.DEBUG, String.format("Got notified of %s for %s of %s (%d).",
report.jobError().map(JobError::toString).orElse("success"),
report.jobType(),
report.applicationId(),
report.projectId()));
if ( ! applications().get(report.applicationId()).isPresent()) {
log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() +
"': Unknown application '" + report.applicationId() + "'");
return;
}
applications().lockOrThrow(report.applicationId(), application -> {
ApplicationVersion applicationVersion = report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber()))
.orElse(ApplicationVersion.unknown);
application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller);
application = application.withProjectId(OptionalLong.of(report.projectId()));
if (report.jobType() == component && report.success()) {
if (acceptNewApplicationVersion(application))
application = application.withChange(application.change().with(applicationVersion));
else
application = application.withOutstandingChange(Change.of(applicationVersion));
}
applications().store(application);
});
}
/**
* Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs.
*
* Only one job is triggered each run for test jobs, since their environments have limited capacity.
*/
public long triggerReadyJobs() {
return computeReadyJobs().collect(partitioningBy(job -> job.jobType().isTest()))
.entrySet().stream()
.flatMap(entry -> (entry.getKey()
? entry.getValue().stream()
.sorted(comparing(Job::isRetry)
.thenComparing(Job::applicationUpgrade)
.reversed()
.thenComparing(Job::availableSince))
.collect(groupingBy(Job::jobType))
: entry.getValue().stream()
.collect(groupingBy(Job::applicationId)))
.values().stream()
.map(jobs -> (Supplier<Long>) jobs.stream()
.filter(job -> canTrigger(job) && trigger(job))
.limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count))
.parallel().map(Supplier::get).reduce(0L, Long::sum);
}
/**
* Attempts to trigger the given job for the given application and returns the outcome.
*
* If the build service can not find the given job, or claims it is illegal to trigger it,
* the project id is removed from the application owning the job, to prevent further trigger attemps.
*/
public boolean trigger(Job job) {
log.log(LogLevel.INFO, String.format("Attempting to trigger %s: %s (%s)", job, job.reason, job.target));
try {
buildService.trigger(job);
applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withJobTriggering(
job.jobType, new JobRun(-1, job.target.targetPlatform, job.target.targetApplication, job.reason, clock.instant()))));
return true;
}
catch (RuntimeException e) {
log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e);
if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException)
applications().lockOrThrow(job.applicationId(), application ->
applications().store(application.withProjectId(OptionalLong.empty())));
return false;
}
}
/**
* Triggers a change of this application
*
* @param applicationId the application to trigger
* @throws IllegalArgumentException if this application already has an ongoing change
*/
public void triggerChange(ApplicationId applicationId, Change change) {
applications().lockOrThrow(applicationId, application -> {
if (application.change().isPresent() && ! application.deploymentJobs().hasFailures())
throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " +
application.change() + " is already in progress");
application = application.withChange(change);
if (change.application().isPresent())
application = application.withOutstandingChange(Change.empty());
applications().store(application);
});
}
/** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */
public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) {
applications().lockOrThrow(applicationId, application -> {
applications().store(application.withChange(application.change().application()
.map(Change::of)
.filter(change -> keepApplicationChange)
.orElse(Change.empty())));
});
}
public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() {
return computeReadyJobs().collect(groupingBy(Job::jobType));
}
/** Returns the set of all jobs which have changes to propagate from the upstream steps. */
private Stream<Job> computeReadyJobs() {
return ApplicationList.from(applications().asList())
.notPullRequest()
.withProjectId()
.deploying()
.idList().stream()
.map(this::computeReadyJobs)
.flatMap(List::stream);
}
/** Returns whether the given job is currently running; false if completed since last triggered, asking the build service othewise. */
public boolean isRunning(Application application, JobType jobType) {
return ! application.deploymentJobs().statusOf(jobType)
.flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))).orElse(false)
&& buildService.isRunning(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()));
}
public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType) {
Application application = applications().require(applicationId);
if (jobType == component) {
buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName()));
return singletonList(component);
}
State target = targetFor(application, application.change(), deploymentFor(application, jobType));
String reason = ">:o:< Triggered by force! (-o-) |-o-| (=oo=)";
if (isVerified(application, target, jobType)) {
trigger(deploymentJob(application, target, application.change(), jobType, reason, clock.instant(), Collections.emptySet()));
return singletonList(jobType);
}
List<Job> testJobs = testJobsFor(application, target, reason, clock.instant());
testJobs.forEach(this::trigger);
return testJobs.stream().map(Job::jobType).collect(toList());
}
private Job deploymentJob(Application application, State target, Change change, JobType jobType, String reason, Instant availableSince, Collection<JobType> concurrentlyWith) {
boolean isRetry = application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::jobError)
.filter(JobError.outOfCapacity::equals).isPresent();
if (isRetry) reason += "; retrying on out of capacity";
return new Job(application, target, jobType, reason, availableSince, concurrentlyWith, isRetry, change.application().isPresent());
}
private Version targetPlatform(Application application, Change change, Optional<Deployment> deployment) {
return max(deployment.map(Deployment::version), change.platform())
.orElse(application.oldestDeployedPlatform()
.orElse(controller.systemVersion()));
}
private ApplicationVersion targetApplication(Application application, Change change, Optional<Deployment> deployment) {
return max(deployment.map(Deployment::applicationVersion), change.application())
.orElse(application.oldestDeployedApplication()
.orElse(application.deploymentJobs().jobStatus().get(component).lastSuccess().get().applicationVersion()));
}
private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) {
return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2;
}
/**
* Finds the next step to trigger for the given application, if any, and returns these as a list.
*/
private List<Job> testJobsFor(Application application, State target, String reason, Instant availableSince) {
List<Step> steps = application.deploymentSpec().steps();
if (steps.isEmpty()) steps = singletonList(new DeploymentSpec.DeclaredZone(test));
List<Job> jobs = new ArrayList<>();
for (Step step : steps.stream().filter(step -> step.deploysTo(test) || step.deploysTo(staging)).collect(toList())) {
for (JobType jobType : step.zones().stream().map(order::toJob).collect(toList())) {
Optional<JobRun> completion = successOn(application, jobType, target);
if (completion.isPresent())
availableSince = completion.get().at();
else if (isVerified(application, target, jobType))
jobs.add(deploymentJob(application, target, application.change(), jobType, reason, availableSince, emptySet()));
}
}
return jobs;
}
private boolean isVerified(Application application, State state, JobType jobType) {
if (jobType.environment() == staging)
return successOn(application, systemTest, state).isPresent();
if (jobType.environment() == prod)
return successOn(application, stagingTest, state).isPresent()
|| ! JobList.from(application).production()
.lastTriggered().on(state.targetPlatform)
.lastTriggered().on(state.targetApplication)
.isEmpty();
return true;
}
/**
* Returns the instant when the given change is complete for the given application for the given job.
*
* Any job is complete if the given change is already successful on that job.
* A production job is also considered complete if its current change is strictly dominated by what
* is already deployed in its zone, i.e., no parts of the change are upgrades, and at least one
* part is a downgrade, regardless of the status of the job.
*/
private Optional<Instant> completedAt(Change change, Application application, JobType jobType) {
State target = targetFor(application, change, deploymentFor(application, jobType));
Optional<JobRun> lastSuccess = successOn(application, jobType, target);
if (lastSuccess.isPresent() || ! jobType.isProduction())
return lastSuccess.map(JobRun::at);
return deploymentFor(application, jobType)
.filter(deployment -> ! ( change.upgrades(deployment.version())
|| change.upgrades(deployment.applicationVersion()))
&& ( change.downgrades(deployment.version())
|| change.downgrades(deployment.applicationVersion())))
.map(Deployment::at);
}
private Optional<JobRun> successOn(Application application, JobType jobType, State target) {
return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess)
.filter(last -> target.targetPlatform.equals(last.version())
&& target.targetApplication.equals(last.applicationVersion()));
}
private boolean canTrigger(Job job) {
Application application = applications().require(job.applicationId());
if (isRunning(application, job.jobType))
return false;
if ( ! job.jobType.isProduction())
return true;
if ( ! job.concurrentlyWith.containsAll(runningProductionJobsFor(application)))
return false;
if ( ! application.changeAt(clock.instant()).isPresent())
return false;
return true;
}
private List<JobType> runningProductionJobsFor(Application application) {
return application.deploymentJobs().jobStatus().keySet().parallelStream()
.filter(job -> job.isProduction())
.filter(job -> isRunning(application, job))
.collect(toList());
}
private ApplicationController applications() {
return controller.applications();
}
private boolean acceptNewApplicationVersion(LockedApplication application) {
if (application.change().application().isPresent()) return true;
if (application.deploymentJobs().hasFailures()) return true;
return ! application.changeAt(clock.instant()).platform().isPresent();
}
private Optional<Deployment> deploymentFor(Application application, JobType jobType) {
return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()).get()));
}
private State targetFor(Application application, Change change, Optional<Deployment> deployment) {
return new State(targetPlatform(application, change, deployment),
targetApplication(application, change, deployment),
deployment.map(Deployment::version),
deployment.map(Deployment::applicationVersion));
}
private static class Job extends BuildJob {
private final JobType jobType;
private final String reason;
private final Instant availableSince;
private final Collection<JobType> concurrentlyWith;
private final boolean isRetry;
private final boolean isApplicationUpgrade;
private final State target;
private Job(Application application, State target, JobType jobType, String reason, Instant availableSince, Collection<JobType> concurrentlyWith, boolean isRetry, boolean isApplicationUpgrade) {
super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName());
this.jobType = jobType;
this.availableSince = availableSince;
this.concurrentlyWith = concurrentlyWith;
this.reason = reason;
this.isRetry = isRetry;
this.isApplicationUpgrade = isApplicationUpgrade;
this.target = target;
}
JobType jobType() { return jobType; }
Instant availableSince() { return availableSince; }
boolean isRetry() { return isRetry; }
boolean applicationUpgrade() { return isApplicationUpgrade; }
}
private static class State {
private final Version targetPlatform;
private final ApplicationVersion targetApplication;
private final Optional<Version> sourcePlatform;
private final Optional<ApplicationVersion> sourceApplication;
public State(Version targetPlatform, ApplicationVersion targetApplication, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication) {
this.targetPlatform = targetPlatform;
this.targetApplication = targetApplication;
this.sourcePlatform = sourcePlatform;
this.sourceApplication = sourceApplication;
}
@Override
public String toString() {
return String.format("platform %s %s, application %s %s",
targetPlatform,
sourcePlatform.map(v -> "(from " + v + ")").orElse(""),
targetApplication.id(),
sourceApplication.map(v -> "(from " + v.id() + ")").orElse(""));
}
}
} | class DeploymentTrigger {
private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName());
private final Controller controller;
private final Clock clock;
private final DeploymentOrder order;
private final BuildService buildService;
public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) {
Objects.requireNonNull(controller, "controller cannot be null");
Objects.requireNonNull(curator, "curator cannot be null");
Objects.requireNonNull(clock, "clock cannot be null");
this.controller = controller;
this.clock = clock;
this.order = new DeploymentOrder(controller::system);
this.buildService = buildService;
}
public DeploymentOrder deploymentOrder() {
return order;
}
/**
* Called each time a job completes (successfully or not) to record information used when deciding what to trigger.
*/
public void notifyOfCompletion(JobReport report) {
log.log(LogLevel.DEBUG, String.format("Got notified of %s for %s of %s (%d).",
report.jobError().map(JobError::toString).orElse("success"),
report.jobType(),
report.applicationId(),
report.projectId()));
if ( ! applications().get(report.applicationId()).isPresent()) {
log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() +
"': Unknown application '" + report.applicationId() + "'");
return;
}
applications().lockOrThrow(report.applicationId(), application -> {
ApplicationVersion applicationVersion = report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber()))
.orElse(ApplicationVersion.unknown);
application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller);
application = application.withProjectId(OptionalLong.of(report.projectId()));
if (report.jobType() == component && report.success()) {
if (acceptNewApplicationVersion(application))
application = application.withChange(application.change().with(applicationVersion));
else
application = application.withOutstandingChange(Change.of(applicationVersion));
}
applications().store(application);
});
}
/**
* Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs.
*
* Only one job is triggered each run for test jobs, since their environments have limited capacity.
*/
public long triggerReadyJobs() {
return computeReadyJobs().collect(partitioningBy(job -> job.jobType().isTest()))
.entrySet().stream()
.flatMap(entry -> (entry.getKey()
? entry.getValue().stream()
.sorted(comparing(Job::isRetry)
.thenComparing(Job::applicationUpgrade)
.reversed()
.thenComparing(Job::availableSince))
.collect(groupingBy(Job::jobType))
: entry.getValue().stream()
.collect(groupingBy(Job::applicationId)))
.values().stream()
.map(jobs -> (Supplier<Long>) jobs.stream()
.filter(job -> canTrigger(job) && trigger(job))
.limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count))
.parallel().map(Supplier::get).reduce(0L, Long::sum);
}
/**
* Attempts to trigger the given job for the given application and returns the outcome.
*
* If the build service can not find the given job, or claims it is illegal to trigger it,
* the project id is removed from the application owning the job, to prevent further trigger attemps.
*/
public boolean trigger(Job job) {
log.log(LogLevel.INFO, String.format("Attempting to trigger %s: %s (%s)", job, job.reason, job.target));
try {
buildService.trigger(job);
applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withJobTriggering(
job.jobType, new JobRun(-1, job.target.targetPlatform, job.target.targetApplication, job.reason, clock.instant()))));
return true;
}
catch (RuntimeException e) {
log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e);
if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException)
applications().lockOrThrow(job.applicationId(), application ->
applications().store(application.withProjectId(OptionalLong.empty())));
return false;
}
}
/**
* Triggers a change of this application
*
* @param applicationId the application to trigger
* @throws IllegalArgumentException if this application already has an ongoing change
*/
public void triggerChange(ApplicationId applicationId, Change change) {
applications().lockOrThrow(applicationId, application -> {
if (application.change().isPresent() && ! application.deploymentJobs().hasFailures())
throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " +
application.change() + " is already in progress");
application = application.withChange(change);
if (change.application().isPresent())
application = application.withOutstandingChange(Change.empty());
applications().store(application);
});
}
/** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */
public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) {
applications().lockOrThrow(applicationId, application -> {
applications().store(application.withChange(application.change().application()
.map(Change::of)
.filter(change -> keepApplicationChange)
.orElse(Change.empty())));
});
}
public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() {
return computeReadyJobs().collect(groupingBy(Job::jobType));
}
/** Returns the set of all jobs which have changes to propagate from the upstream steps. */
private Stream<Job> computeReadyJobs() {
return ApplicationList.from(applications().asList())
.notPullRequest()
.withProjectId()
.deploying()
.idList().stream()
.map(this::computeReadyJobs)
.flatMap(List::stream);
}
/** Returns whether the given job is currently running; false if completed since last triggered, asking the build service othewise. */
public boolean isRunning(Application application, JobType jobType) {
return ! application.deploymentJobs().statusOf(jobType)
.flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))).orElse(false)
&& buildService.isRunning(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()));
}
public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType) {
Application application = applications().require(applicationId);
if (jobType == component) {
buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName()));
return singletonList(component);
}
State target = targetFor(application, application.change(), deploymentFor(application, jobType));
String reason = ">:o:< Triggered by force! (-o-) |-o-| (=oo=)";
if (isVerified(application, target, jobType)) {
trigger(deploymentJob(application, target, application.change(), jobType, reason, clock.instant(), Collections.emptySet()));
return singletonList(jobType);
}
List<Job> testJobs = testJobsFor(application, target, reason, clock.instant());
testJobs.forEach(this::trigger);
return testJobs.stream().map(Job::jobType).collect(toList());
}
private Job deploymentJob(Application application, State target, Change change, JobType jobType, String reason, Instant availableSince, Collection<JobType> concurrentlyWith) {
boolean isRetry = application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::jobError)
.filter(JobError.outOfCapacity::equals).isPresent();
if (isRetry) reason += "; retrying on out of capacity";
return new Job(application, target, jobType, reason, availableSince, concurrentlyWith, isRetry, change.application().isPresent());
}
private Version targetPlatform(Application application, Change change, Optional<Deployment> deployment) {
return max(deployment.map(Deployment::version), change.platform())
.orElse(application.oldestDeployedPlatform()
.orElse(controller.systemVersion()));
}
private ApplicationVersion targetApplication(Application application, Change change, Optional<Deployment> deployment) {
return max(deployment.map(Deployment::applicationVersion), change.application())
.orElse(application.oldestDeployedApplication()
.orElse(application.deploymentJobs().jobStatus().get(component).lastSuccess().get().applicationVersion()));
}
private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) {
return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2;
}
/**
* Finds the next step to trigger for the given application, if any, and returns these as a list.
*/
private List<Job> testJobsFor(Application application, State target, String reason, Instant availableSince) {
List<Step> steps = application.deploymentSpec().steps();
if (steps.isEmpty()) steps = singletonList(new DeploymentSpec.DeclaredZone(test));
List<Job> jobs = new ArrayList<>();
for (Step step : steps.stream().filter(step -> step.deploysTo(test) || step.deploysTo(staging)).collect(toList())) {
for (JobType jobType : step.zones().stream().map(order::toJob).collect(toList())) {
Optional<JobRun> completion = successOn(application, jobType, target);
if (completion.isPresent())
availableSince = completion.get().at();
else if (isVerified(application, target, jobType))
jobs.add(deploymentJob(application, target, application.change(), jobType, reason, availableSince, emptySet()));
}
}
return jobs;
}
private boolean isVerified(Application application, State state, JobType jobType) {
if (jobType.environment() == staging)
return successOn(application, systemTest, state).isPresent();
if (jobType.environment() == prod)
return successOn(application, stagingTest, state).isPresent()
|| ! JobList.from(application).production()
.lastTriggered().on(state.targetPlatform)
.lastTriggered().on(state.targetApplication)
.isEmpty();
return true;
}
private Optional<Instant> testedAt(Application application, State target) {
return max(successOn(application, systemTest, target).map(JobRun::at),
successOn(application, stagingTest, target).map(JobRun::at));
}
private boolean alreadyTriggered(Application application, State target) {
return ! JobList.from(application).production()
.lastTriggered().on(target.targetPlatform)
.lastTriggered().on(target.targetApplication)
.isEmpty();
}
/**
* Returns the instant when the given change is complete for the given application for the given job.
*
* Any job is complete if the given change is already successful on that job.
* A production job is also considered complete if its current change is strictly dominated by what
* is already deployed in its zone, i.e., no parts of the change are upgrades, and at least one
* part is a downgrade, regardless of the status of the job.
*/
private Optional<Instant> completedAt(Change change, Application application, JobType jobType) {
State target = targetFor(application, change, deploymentFor(application, jobType));
Optional<JobRun> lastSuccess = successOn(application, jobType, target);
if (lastSuccess.isPresent() || ! jobType.isProduction())
return lastSuccess.map(JobRun::at);
return deploymentFor(application, jobType)
.filter(deployment -> ! ( change.upgrades(deployment.version())
|| change.upgrades(deployment.applicationVersion()))
&& ( change.downgrades(deployment.version())
|| change.downgrades(deployment.applicationVersion())))
.map(Deployment::at);
}
private Optional<JobRun> successOn(Application application, JobType jobType, State target) {
return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess)
.filter(last -> target.targetPlatform.equals(last.version())
&& target.targetApplication.equals(last.applicationVersion()));
}
private boolean canTrigger(Job job) {
Application application = applications().require(job.applicationId());
if (isRunning(application, job.jobType))
return false;
if ( ! job.jobType.isProduction())
return true;
if ( ! job.concurrentlyWith.containsAll(runningProductionJobsFor(application)))
return false;
if ( ! application.changeAt(clock.instant()).isPresent())
return false;
return true;
}
private List<JobType> runningProductionJobsFor(Application application) {
return application.deploymentJobs().jobStatus().keySet().parallelStream()
.filter(job -> job.isProduction())
.filter(job -> isRunning(application, job))
.collect(toList());
}
private ApplicationController applications() {
return controller.applications();
}
private boolean acceptNewApplicationVersion(LockedApplication application) {
if (application.change().application().isPresent()) return true;
if (application.deploymentJobs().hasFailures()) return true;
return ! application.changeAt(clock.instant()).platform().isPresent();
}
private Optional<Deployment> deploymentFor(Application application, JobType jobType) {
return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()).get()));
}
private State targetFor(Application application, Change change, Optional<Deployment> deployment) {
return new State(targetPlatform(application, change, deployment),
targetApplication(application, change, deployment),
deployment.map(Deployment::version),
deployment.map(Deployment::applicationVersion));
}
private static class Job extends BuildJob {
private final JobType jobType;
private final String reason;
private final Instant availableSince;
private final Collection<JobType> concurrentlyWith;
private final boolean isRetry;
private final boolean isApplicationUpgrade;
private final State target;
private Job(Application application, State target, JobType jobType, String reason, Instant availableSince, Collection<JobType> concurrentlyWith, boolean isRetry, boolean isApplicationUpgrade) {
super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName());
this.jobType = jobType;
this.availableSince = availableSince;
this.concurrentlyWith = concurrentlyWith;
this.reason = reason;
this.isRetry = isRetry;
this.isApplicationUpgrade = isApplicationUpgrade;
this.target = target;
}
JobType jobType() { return jobType; }
Instant availableSince() { return availableSince; }
boolean isRetry() { return isRetry; }
boolean applicationUpgrade() { return isApplicationUpgrade; }
}
private static class State {
private final Version targetPlatform;
private final ApplicationVersion targetApplication;
private final Optional<Version> sourcePlatform;
private final Optional<ApplicationVersion> sourceApplication;
public State(Version targetPlatform, ApplicationVersion targetApplication, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication) {
this.targetPlatform = targetPlatform;
this.targetApplication = targetApplication;
this.sourcePlatform = sourcePlatform;
this.sourceApplication = sourceApplication;
}
@Override
public String toString() {
return String.format("platform %s %s, application %s %s",
targetPlatform,
sourcePlatform.map(v -> "(from " + v + ")").orElse(""),
targetApplication.id(),
sourceApplication.map(v -> "(from " + v.id() + ")").orElse(""));
}
}
} |
Otherwise, an application with two upgrade targets (due to previous failed deployments, or, later, due to an aborted change) would start tests for the other target immediately after completing them for the first, disregarding the delay. | private List<Job> computeReadyJobs(ApplicationId id) {
List<Job> jobs = new ArrayList<>();
applications().get(id).ifPresent(application -> {
List<Step> steps = application.deploymentSpec().steps().isEmpty()
? singletonList(new DeploymentSpec.DeclaredZone(test))
: application.deploymentSpec().steps();
List<Step> productionSteps = steps.stream().filter(step -> step.deploysTo(prod) || step.zones().isEmpty()).collect(toList());
Optional<Instant> completedAt = application.deploymentJobs().statusOf(stagingTest)
.flatMap(JobStatus::lastSuccess).map(JobRun::at);
String reason = "New change available";
List<Job> testJobs = null;
for (Step step : productionSteps) {
Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet());
Map<Optional<Instant>, List<JobType>> jobsByCompletion = stepJobs.stream().collect(groupingBy(job -> completedAt(application.change(), application, job)));
if (jobsByCompletion.containsKey(empty())) {
for (JobType job : jobsByCompletion.get(empty())) {
State target = targetFor(application, application.change(), deploymentFor(application, job));
if (isVerified(application, target, job)) {
if (completedAt.isPresent())
jobs.add(deploymentJob(application, target, application.change(), job, reason, completedAt.get(), stepJobs));
}
else if (testJobs == null) {
if ( ! alreadyTriggered(application, target))
testJobs = testJobsFor(application, target, "Testing deployment for " + job.jobName(), completedAt.orElse(clock.instant()));
}
}
}
else {
if (stepJobs.isEmpty()) {
Duration delay = ((DeploymentSpec.Delay) step).duration();
completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant()));
reason += " after a delay of " + delay;
}
else {
completedAt = jobsByCompletion.keySet().stream().map(Optional::get).max(naturalOrder());
reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", "));
}
}
}
if (testJobs == null)
testJobs = testJobsFor(application, targetFor(application, application.change(), empty()), "Testing last changes outside prod", clock.instant());
jobs.addAll(testJobs);
if (steps.stream().flatMap(step -> step.zones().stream()).map(order::toJob)
.allMatch(job -> completedAt(application.change(), application, job).isPresent()))
applications().lockIfPresent(id, lockedApplication -> applications().store(lockedApplication.withChange(Change.empty())));
});
return jobs;
} | if ( ! alreadyTriggered(application, target)) | private List<Job> computeReadyJobs(ApplicationId id) {
List<Job> jobs = new ArrayList<>();
applications().get(id).ifPresent(application -> {
List<Step> steps = application.deploymentSpec().steps().isEmpty()
? singletonList(new DeploymentSpec.DeclaredZone(test))
: application.deploymentSpec().steps();
List<Step> productionSteps = steps.stream().filter(step -> step.deploysTo(prod) || step.zones().isEmpty()).collect(toList());
Optional<Instant> completedAt = application.deploymentJobs().statusOf(stagingTest)
.flatMap(JobStatus::lastSuccess).map(JobRun::at);
String reason = "New change available";
List<Job> testJobs = null;
for (Step step : productionSteps) {
Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet());
Map<Optional<Instant>, List<JobType>> jobsByCompletion = stepJobs.stream().collect(groupingBy(job -> completedAt(application.change(), application, job)));
if (jobsByCompletion.containsKey(empty())) {
for (JobType job : jobsByCompletion.get(empty())) {
State target = targetFor(application, application.change(), deploymentFor(application, job));
if (isVerified(application, target, job)) {
if (completedAt.isPresent())
jobs.add(deploymentJob(application, target, application.change(), job, reason, completedAt.get(), stepJobs));
}
else if (testJobs == null) {
if ( ! alreadyTriggered(application, target))
testJobs = testJobsFor(application, target, "Testing deployment for " + job.jobName(), completedAt.orElse(clock.instant()));
else
testJobs = emptyList();
}
}
}
else {
if (stepJobs.isEmpty()) {
Duration delay = ((DeploymentSpec.Delay) step).duration();
completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant()));
reason += " after a delay of " + delay;
}
else {
completedAt = jobsByCompletion.keySet().stream().map(Optional::get).max(naturalOrder());
reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", "));
}
}
}
if (testJobs == null)
testJobs = testJobsFor(application, targetFor(application, application.change(), empty()), "Testing last changes outside prod", clock.instant());
jobs.addAll(testJobs);
if (steps.stream().flatMap(step -> step.zones().stream()).map(order::toJob)
.allMatch(job -> completedAt(application.change(), application, job).isPresent()))
applications().lockIfPresent(id, lockedApplication -> applications().store(lockedApplication.withChange(Change.empty())));
});
return jobs;
} | class DeploymentTrigger {
private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName());
private final Controller controller;
private final Clock clock;
private final DeploymentOrder order;
private final BuildService buildService;
public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) {
Objects.requireNonNull(controller, "controller cannot be null");
Objects.requireNonNull(curator, "curator cannot be null");
Objects.requireNonNull(clock, "clock cannot be null");
this.controller = controller;
this.clock = clock;
this.order = new DeploymentOrder(controller::system);
this.buildService = buildService;
}
public DeploymentOrder deploymentOrder() {
return order;
}
/**
* Called each time a job completes (successfully or not) to record information used when deciding what to trigger.
*/
public void notifyOfCompletion(JobReport report) {
log.log(LogLevel.DEBUG, String.format("Got notified of %s for %s of %s (%d).",
report.jobError().map(JobError::toString).orElse("success"),
report.jobType(),
report.applicationId(),
report.projectId()));
if ( ! applications().get(report.applicationId()).isPresent()) {
log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() +
"': Unknown application '" + report.applicationId() + "'");
return;
}
applications().lockOrThrow(report.applicationId(), application -> {
ApplicationVersion applicationVersion = report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber()))
.orElse(ApplicationVersion.unknown);
application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller);
application = application.withProjectId(OptionalLong.of(report.projectId()));
if (report.jobType() == component && report.success()) {
if (acceptNewApplicationVersion(application))
application = application.withChange(application.change().with(applicationVersion));
else
application = application.withOutstandingChange(Change.of(applicationVersion));
}
applications().store(application);
});
}
/**
* Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs.
*
* Only one job is triggered each run for test jobs, since their environments have limited capacity.
*/
public long triggerReadyJobs() {
return computeReadyJobs().collect(partitioningBy(job -> job.jobType().isTest()))
.entrySet().stream()
.flatMap(entry -> (entry.getKey()
? entry.getValue().stream()
.sorted(comparing(Job::isRetry)
.thenComparing(Job::applicationUpgrade)
.reversed()
.thenComparing(Job::availableSince))
.collect(groupingBy(Job::jobType))
: entry.getValue().stream()
.collect(groupingBy(Job::applicationId)))
.values().stream()
.map(jobs -> (Supplier<Long>) jobs.stream()
.filter(job -> canTrigger(job) && trigger(job))
.limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count))
.parallel().map(Supplier::get).reduce(0L, Long::sum);
}
/**
* Attempts to trigger the given job for the given application and returns the outcome.
*
* If the build service can not find the given job, or claims it is illegal to trigger it,
* the project id is removed from the application owning the job, to prevent further trigger attemps.
*/
public boolean trigger(Job job) {
log.log(LogLevel.INFO, String.format("Attempting to trigger %s: %s (%s)", job, job.reason, job.target));
try {
buildService.trigger(job);
applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withJobTriggering(
job.jobType, new JobRun(-1, job.target.targetPlatform, job.target.targetApplication, job.reason, clock.instant()))));
return true;
}
catch (RuntimeException e) {
log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e);
if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException)
applications().lockOrThrow(job.applicationId(), application ->
applications().store(application.withProjectId(OptionalLong.empty())));
return false;
}
}
/**
* Triggers a change of this application
*
* @param applicationId the application to trigger
* @throws IllegalArgumentException if this application already has an ongoing change
*/
public void triggerChange(ApplicationId applicationId, Change change) {
applications().lockOrThrow(applicationId, application -> {
if (application.change().isPresent() && ! application.deploymentJobs().hasFailures())
throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " +
application.change() + " is already in progress");
application = application.withChange(change);
if (change.application().isPresent())
application = application.withOutstandingChange(Change.empty());
applications().store(application);
});
}
/** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */
public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) {
applications().lockOrThrow(applicationId, application -> {
applications().store(application.withChange(application.change().application()
.map(Change::of)
.filter(change -> keepApplicationChange)
.orElse(Change.empty())));
});
}
public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() {
return computeReadyJobs().collect(groupingBy(Job::jobType));
}
/** Returns the set of all jobs which have changes to propagate from the upstream steps. */
private Stream<Job> computeReadyJobs() {
return ApplicationList.from(applications().asList())
.notPullRequest()
.withProjectId()
.deploying()
.idList().stream()
.map(this::computeReadyJobs)
.flatMap(List::stream);
}
/** Returns whether the given job is currently running; false if completed since last triggered, asking the build service othewise. */
public boolean isRunning(Application application, JobType jobType) {
return ! application.deploymentJobs().statusOf(jobType)
.flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))).orElse(false)
&& buildService.isRunning(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()));
}
public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType) {
Application application = applications().require(applicationId);
if (jobType == component) {
buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName()));
return singletonList(component);
}
State target = targetFor(application, application.change(), deploymentFor(application, jobType));
String reason = ">:o:< Triggered by force! (-o-) |-o-| (=oo=)";
if (isVerified(application, target, jobType)) {
trigger(deploymentJob(application, target, application.change(), jobType, reason, clock.instant(), Collections.emptySet()));
return singletonList(jobType);
}
List<Job> testJobs = testJobsFor(application, target, reason, clock.instant());
testJobs.forEach(this::trigger);
return testJobs.stream().map(Job::jobType).collect(toList());
}
private Job deploymentJob(Application application, State target, Change change, JobType jobType, String reason, Instant availableSince, Collection<JobType> concurrentlyWith) {
boolean isRetry = application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::jobError)
.filter(JobError.outOfCapacity::equals).isPresent();
if (isRetry) reason += "; retrying on out of capacity";
return new Job(application, target, jobType, reason, availableSince, concurrentlyWith, isRetry, change.application().isPresent());
}
private Version targetPlatform(Application application, Change change, Optional<Deployment> deployment) {
return max(deployment.map(Deployment::version), change.platform())
.orElse(application.oldestDeployedPlatform()
.orElse(controller.systemVersion()));
}
private ApplicationVersion targetApplication(Application application, Change change, Optional<Deployment> deployment) {
return max(deployment.map(Deployment::applicationVersion), change.application())
.orElse(application.oldestDeployedApplication()
.orElse(application.deploymentJobs().jobStatus().get(component).lastSuccess().get().applicationVersion()));
}
private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) {
return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2;
}
/**
* Finds the next step to trigger for the given application, if any, and returns these as a list.
*/
private List<Job> testJobsFor(Application application, State target, String reason, Instant availableSince) {
List<Step> steps = application.deploymentSpec().steps();
if (steps.isEmpty()) steps = singletonList(new DeploymentSpec.DeclaredZone(test));
List<Job> jobs = new ArrayList<>();
for (Step step : steps.stream().filter(step -> step.deploysTo(test) || step.deploysTo(staging)).collect(toList())) {
for (JobType jobType : step.zones().stream().map(order::toJob).collect(toList())) {
Optional<JobRun> completion = successOn(application, jobType, target);
if (completion.isPresent())
availableSince = completion.get().at();
else if (isVerified(application, target, jobType))
jobs.add(deploymentJob(application, target, application.change(), jobType, reason, availableSince, emptySet()));
}
}
return jobs;
}
private boolean isVerified(Application application, State state, JobType jobType) {
if (jobType.environment() == staging)
return successOn(application, systemTest, state).isPresent();
if (jobType.environment() == prod)
return successOn(application, stagingTest, state).isPresent()
|| ! JobList.from(application).production()
.lastTriggered().on(state.targetPlatform)
.lastTriggered().on(state.targetApplication)
.isEmpty();
return true;
}
/**
* Returns the instant when the given change is complete for the given application for the given job.
*
* Any job is complete if the given change is already successful on that job.
* A production job is also considered complete if its current change is strictly dominated by what
* is already deployed in its zone, i.e., no parts of the change are upgrades, and at least one
* part is a downgrade, regardless of the status of the job.
*/
private Optional<Instant> completedAt(Change change, Application application, JobType jobType) {
State target = targetFor(application, change, deploymentFor(application, jobType));
Optional<JobRun> lastSuccess = successOn(application, jobType, target);
if (lastSuccess.isPresent() || ! jobType.isProduction())
return lastSuccess.map(JobRun::at);
return deploymentFor(application, jobType)
.filter(deployment -> ! ( change.upgrades(deployment.version())
|| change.upgrades(deployment.applicationVersion()))
&& ( change.downgrades(deployment.version())
|| change.downgrades(deployment.applicationVersion())))
.map(Deployment::at);
}
private Optional<JobRun> successOn(Application application, JobType jobType, State target) {
return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess)
.filter(last -> target.targetPlatform.equals(last.version())
&& target.targetApplication.equals(last.applicationVersion()));
}
private boolean canTrigger(Job job) {
Application application = applications().require(job.applicationId());
if (isRunning(application, job.jobType))
return false;
if ( ! job.jobType.isProduction())
return true;
if ( ! job.concurrentlyWith.containsAll(runningProductionJobsFor(application)))
return false;
if ( ! application.changeAt(clock.instant()).isPresent())
return false;
return true;
}
private List<JobType> runningProductionJobsFor(Application application) {
return application.deploymentJobs().jobStatus().keySet().parallelStream()
.filter(job -> job.isProduction())
.filter(job -> isRunning(application, job))
.collect(toList());
}
private ApplicationController applications() {
return controller.applications();
}
private boolean acceptNewApplicationVersion(LockedApplication application) {
if (application.change().application().isPresent()) return true;
if (application.deploymentJobs().hasFailures()) return true;
return ! application.changeAt(clock.instant()).platform().isPresent();
}
private Optional<Deployment> deploymentFor(Application application, JobType jobType) {
return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()).get()));
}
private State targetFor(Application application, Change change, Optional<Deployment> deployment) {
return new State(targetPlatform(application, change, deployment),
targetApplication(application, change, deployment),
deployment.map(Deployment::version),
deployment.map(Deployment::applicationVersion));
}
private static class Job extends BuildJob {
private final JobType jobType;
private final String reason;
private final Instant availableSince;
private final Collection<JobType> concurrentlyWith;
private final boolean isRetry;
private final boolean isApplicationUpgrade;
private final State target;
private Job(Application application, State target, JobType jobType, String reason, Instant availableSince, Collection<JobType> concurrentlyWith, boolean isRetry, boolean isApplicationUpgrade) {
super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName());
this.jobType = jobType;
this.availableSince = availableSince;
this.concurrentlyWith = concurrentlyWith;
this.reason = reason;
this.isRetry = isRetry;
this.isApplicationUpgrade = isApplicationUpgrade;
this.target = target;
}
JobType jobType() { return jobType; }
Instant availableSince() { return availableSince; }
boolean isRetry() { return isRetry; }
boolean applicationUpgrade() { return isApplicationUpgrade; }
}
private static class State {
private final Version targetPlatform;
private final ApplicationVersion targetApplication;
private final Optional<Version> sourcePlatform;
private final Optional<ApplicationVersion> sourceApplication;
public State(Version targetPlatform, ApplicationVersion targetApplication, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication) {
this.targetPlatform = targetPlatform;
this.targetApplication = targetApplication;
this.sourcePlatform = sourcePlatform;
this.sourceApplication = sourceApplication;
}
@Override
public String toString() {
return String.format("platform %s %s, application %s %s",
targetPlatform,
sourcePlatform.map(v -> "(from " + v + ")").orElse(""),
targetApplication.id(),
sourceApplication.map(v -> "(from " + v.id() + ")").orElse(""));
}
}
} | class DeploymentTrigger {
private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName());
private final Controller controller;
private final Clock clock;
private final DeploymentOrder order;
private final BuildService buildService;
public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) {
Objects.requireNonNull(controller, "controller cannot be null");
Objects.requireNonNull(curator, "curator cannot be null");
Objects.requireNonNull(clock, "clock cannot be null");
this.controller = controller;
this.clock = clock;
this.order = new DeploymentOrder(controller::system);
this.buildService = buildService;
}
public DeploymentOrder deploymentOrder() {
return order;
}
/**
* Called each time a job completes (successfully or not) to record information used when deciding what to trigger.
*/
public void notifyOfCompletion(JobReport report) {
log.log(LogLevel.DEBUG, String.format("Got notified of %s for %s of %s (%d).",
report.jobError().map(JobError::toString).orElse("success"),
report.jobType(),
report.applicationId(),
report.projectId()));
if ( ! applications().get(report.applicationId()).isPresent()) {
log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() +
"': Unknown application '" + report.applicationId() + "'");
return;
}
applications().lockOrThrow(report.applicationId(), application -> {
ApplicationVersion applicationVersion = report.sourceRevision().map(sr -> ApplicationVersion.from(sr, report.buildNumber()))
.orElse(ApplicationVersion.unknown);
application = application.withJobCompletion(report, applicationVersion, clock.instant(), controller);
application = application.withProjectId(OptionalLong.of(report.projectId()));
if (report.jobType() == component && report.success()) {
if (acceptNewApplicationVersion(application))
application = application.withChange(application.change().with(applicationVersion));
else
application = application.withOutstandingChange(Change.of(applicationVersion));
}
applications().store(application);
});
}
/**
* Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs.
*
* Only one job is triggered each run for test jobs, since their environments have limited capacity.
*/
public long triggerReadyJobs() {
return computeReadyJobs().collect(partitioningBy(job -> job.jobType().isTest()))
.entrySet().stream()
.flatMap(entry -> (entry.getKey()
? entry.getValue().stream()
.sorted(comparing(Job::isRetry)
.thenComparing(Job::applicationUpgrade)
.reversed()
.thenComparing(Job::availableSince))
.collect(groupingBy(Job::jobType))
: entry.getValue().stream()
.collect(groupingBy(Job::applicationId)))
.values().stream()
.map(jobs -> (Supplier<Long>) jobs.stream()
.filter(job -> canTrigger(job) && trigger(job))
.limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count))
.parallel().map(Supplier::get).reduce(0L, Long::sum);
}
/**
* Attempts to trigger the given job for the given application and returns the outcome.
*
* If the build service can not find the given job, or claims it is illegal to trigger it,
* the project id is removed from the application owning the job, to prevent further trigger attemps.
*/
public boolean trigger(Job job) {
log.log(LogLevel.INFO, String.format("Attempting to trigger %s: %s (%s)", job, job.reason, job.target));
try {
buildService.trigger(job);
applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withJobTriggering(
job.jobType, new JobRun(-1, job.target.targetPlatform, job.target.targetApplication, job.reason, clock.instant()))));
return true;
}
catch (RuntimeException e) {
log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e);
if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException)
applications().lockOrThrow(job.applicationId(), application ->
applications().store(application.withProjectId(OptionalLong.empty())));
return false;
}
}
/**
* Triggers a change of this application
*
* @param applicationId the application to trigger
* @throws IllegalArgumentException if this application already has an ongoing change
*/
public void triggerChange(ApplicationId applicationId, Change change) {
applications().lockOrThrow(applicationId, application -> {
if (application.change().isPresent() && ! application.deploymentJobs().hasFailures())
throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " +
application.change() + " is already in progress");
application = application.withChange(change);
if (change.application().isPresent())
application = application.withOutstandingChange(Change.empty());
applications().store(application);
});
}
/** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */
public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) {
applications().lockOrThrow(applicationId, application -> {
applications().store(application.withChange(application.change().application()
.map(Change::of)
.filter(change -> keepApplicationChange)
.orElse(Change.empty())));
});
}
public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() {
return computeReadyJobs().collect(groupingBy(Job::jobType));
}
/** Returns the set of all jobs which have changes to propagate from the upstream steps. */
private Stream<Job> computeReadyJobs() {
return ApplicationList.from(applications().asList())
.notPullRequest()
.withProjectId()
.deploying()
.idList().stream()
.map(this::computeReadyJobs)
.flatMap(List::stream);
}
/** Returns whether the given job is currently running; false if completed since last triggered, asking the build service othewise. */
public boolean isRunning(Application application, JobType jobType) {
return ! application.deploymentJobs().statusOf(jobType)
.flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))).orElse(false)
&& buildService.isRunning(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()));
}
public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType) {
Application application = applications().require(applicationId);
if (jobType == component) {
buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName()));
return singletonList(component);
}
State target = targetFor(application, application.change(), deploymentFor(application, jobType));
String reason = ">:o:< Triggered by force! (-o-) |-o-| (=oo=)";
if (isVerified(application, target, jobType)) {
trigger(deploymentJob(application, target, application.change(), jobType, reason, clock.instant(), Collections.emptySet()));
return singletonList(jobType);
}
List<Job> testJobs = testJobsFor(application, target, reason, clock.instant());
testJobs.forEach(this::trigger);
return testJobs.stream().map(Job::jobType).collect(toList());
}
private Job deploymentJob(Application application, State target, Change change, JobType jobType, String reason, Instant availableSince, Collection<JobType> concurrentlyWith) {
boolean isRetry = application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::jobError)
.filter(JobError.outOfCapacity::equals).isPresent();
if (isRetry) reason += "; retrying on out of capacity";
return new Job(application, target, jobType, reason, availableSince, concurrentlyWith, isRetry, change.application().isPresent());
}
private Version targetPlatform(Application application, Change change, Optional<Deployment> deployment) {
return max(deployment.map(Deployment::version), change.platform())
.orElse(application.oldestDeployedPlatform()
.orElse(controller.systemVersion()));
}
private ApplicationVersion targetApplication(Application application, Change change, Optional<Deployment> deployment) {
return max(deployment.map(Deployment::applicationVersion), change.application())
.orElse(application.oldestDeployedApplication()
.orElse(application.deploymentJobs().jobStatus().get(component).lastSuccess().get().applicationVersion()));
}
private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) {
return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2;
}
/**
* Finds the next step to trigger for the given application, if any, and returns these as a list.
*/
private List<Job> testJobsFor(Application application, State target, String reason, Instant availableSince) {
List<Step> steps = application.deploymentSpec().steps();
if (steps.isEmpty()) steps = singletonList(new DeploymentSpec.DeclaredZone(test));
List<Job> jobs = new ArrayList<>();
for (Step step : steps.stream().filter(step -> step.deploysTo(test) || step.deploysTo(staging)).collect(toList())) {
for (JobType jobType : step.zones().stream().map(order::toJob).collect(toList())) {
Optional<JobRun> completion = successOn(application, jobType, target);
if (completion.isPresent())
availableSince = completion.get().at();
else if (isVerified(application, target, jobType))
jobs.add(deploymentJob(application, target, application.change(), jobType, reason, availableSince, emptySet()));
}
}
return jobs;
}
private boolean isVerified(Application application, State state, JobType jobType) {
if (jobType.environment() == staging)
return successOn(application, systemTest, state).isPresent();
if (jobType.environment() == prod)
return successOn(application, stagingTest, state).isPresent()
|| ! JobList.from(application).production()
.lastTriggered().on(state.targetPlatform)
.lastTriggered().on(state.targetApplication)
.isEmpty();
return true;
}
private Optional<Instant> testedAt(Application application, State target) {
return max(successOn(application, systemTest, target).map(JobRun::at),
successOn(application, stagingTest, target).map(JobRun::at));
}
private boolean alreadyTriggered(Application application, State target) {
return ! JobList.from(application).production()
.lastTriggered().on(target.targetPlatform)
.lastTriggered().on(target.targetApplication)
.isEmpty();
}
/**
* Returns the instant when the given change is complete for the given application for the given job.
*
* Any job is complete if the given change is already successful on that job.
* A production job is also considered complete if its current change is strictly dominated by what
* is already deployed in its zone, i.e., no parts of the change are upgrades, and at least one
* part is a downgrade, regardless of the status of the job.
*/
private Optional<Instant> completedAt(Change change, Application application, JobType jobType) {
State target = targetFor(application, change, deploymentFor(application, jobType));
Optional<JobRun> lastSuccess = successOn(application, jobType, target);
if (lastSuccess.isPresent() || ! jobType.isProduction())
return lastSuccess.map(JobRun::at);
return deploymentFor(application, jobType)
.filter(deployment -> ! ( change.upgrades(deployment.version())
|| change.upgrades(deployment.applicationVersion()))
&& ( change.downgrades(deployment.version())
|| change.downgrades(deployment.applicationVersion())))
.map(Deployment::at);
}
private Optional<JobRun> successOn(Application application, JobType jobType, State target) {
return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess)
.filter(last -> target.targetPlatform.equals(last.version())
&& target.targetApplication.equals(last.applicationVersion()));
}
private boolean canTrigger(Job job) {
Application application = applications().require(job.applicationId());
if (isRunning(application, job.jobType))
return false;
if ( ! job.jobType.isProduction())
return true;
if ( ! job.concurrentlyWith.containsAll(runningProductionJobsFor(application)))
return false;
if ( ! application.changeAt(clock.instant()).isPresent())
return false;
return true;
}
private List<JobType> runningProductionJobsFor(Application application) {
return application.deploymentJobs().jobStatus().keySet().parallelStream()
.filter(job -> job.isProduction())
.filter(job -> isRunning(application, job))
.collect(toList());
}
private ApplicationController applications() {
return controller.applications();
}
private boolean acceptNewApplicationVersion(LockedApplication application) {
if (application.change().application().isPresent()) return true;
if (application.deploymentJobs().hasFailures()) return true;
return ! application.changeAt(clock.instant()).platform().isPresent();
}
private Optional<Deployment> deploymentFor(Application application, JobType jobType) {
return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()).get()));
}
private State targetFor(Application application, Change change, Optional<Deployment> deployment) {
return new State(targetPlatform(application, change, deployment),
targetApplication(application, change, deployment),
deployment.map(Deployment::version),
deployment.map(Deployment::applicationVersion));
}
private static class Job extends BuildJob {
private final JobType jobType;
private final String reason;
private final Instant availableSince;
private final Collection<JobType> concurrentlyWith;
private final boolean isRetry;
private final boolean isApplicationUpgrade;
private final State target;
private Job(Application application, State target, JobType jobType, String reason, Instant availableSince, Collection<JobType> concurrentlyWith, boolean isRetry, boolean isApplicationUpgrade) {
super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName());
this.jobType = jobType;
this.availableSince = availableSince;
this.concurrentlyWith = concurrentlyWith;
this.reason = reason;
this.isRetry = isRetry;
this.isApplicationUpgrade = isApplicationUpgrade;
this.target = target;
}
JobType jobType() { return jobType; }
Instant availableSince() { return availableSince; }
boolean isRetry() { return isRetry; }
boolean applicationUpgrade() { return isApplicationUpgrade; }
}
private static class State {
private final Version targetPlatform;
private final ApplicationVersion targetApplication;
private final Optional<Version> sourcePlatform;
private final Optional<ApplicationVersion> sourceApplication;
public State(Version targetPlatform, ApplicationVersion targetApplication, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication) {
this.targetPlatform = targetPlatform;
this.targetApplication = targetApplication;
this.sourcePlatform = sourcePlatform;
this.sourceApplication = sourceApplication;
}
@Override
public String toString() {
return String.format("platform %s %s, application %s %s",
targetPlatform,
sourcePlatform.map(v -> "(from " + v + ")").orElse(""),
targetApplication.id(),
sourceApplication.map(v -> "(from " + v.id() + ")").orElse(""));
}
}
} |
Idea: How about we try to track the size the diff would have had and print that here? Now we have no idea how much larger we need to raise MAX_LENGTH to be able to see the diffs. | public boolean edit(Consumer<String> logConsumer) {
List<String> lines = supplier.get();
List<String> newLines = new LinkedList<>();
StringBuilder diff = new StringBuilder();
boolean modified = false;
for (String line : lines) {
LineEdit edit = editor.edit(line);
if (!edit.prependLines().isEmpty()) {
modified = true;
maybeAdd(diff, edit.prependLines());
newLines.addAll(edit.prependLines());
}
switch (edit.getType()) {
case REPLACE:
modified = true;
maybeRemove(diff, line);
break;
case NONE:
newLines.add(line);
break;
default:
throw new IllegalArgumentException("Unknown EditType " + edit.getType());
}
if (!edit.appendLines().isEmpty()) {
modified = true;
maybeAdd(diff, edit.appendLines());
newLines.addAll(edit.appendLines());
}
}
List<String> linesToAppend = editor.onComplete();
if (!linesToAppend.isEmpty()) {
modified = true;
newLines.addAll(linesToAppend);
maybeAdd(diff, linesToAppend);
}
if (!modified) {
return false;
}
String diffDescription = diffTooLarge(diff) ? ": Diff too large" : ":\n" + diff.toString();
logConsumer.accept("Patching file " + name + diffDescription);
consumer.accept(newLines);
return true;
} | String diffDescription = diffTooLarge(diff) ? ": Diff too large" : ":\n" + diff.toString(); | public boolean edit(Consumer<String> logConsumer) {
List<String> lines = supplier.get();
List<String> newLines = new LinkedList<>();
StringBuilder diff = new StringBuilder();
boolean modified = false;
for (String line : lines) {
LineEdit edit = editor.edit(line);
if (!edit.prependLines().isEmpty()) {
modified = true;
maybeAdd(diff, edit.prependLines());
newLines.addAll(edit.prependLines());
}
switch (edit.getType()) {
case REPLACE:
modified = true;
maybeRemove(diff, line);
break;
case NONE:
newLines.add(line);
break;
default:
throw new IllegalArgumentException("Unknown EditType " + edit.getType());
}
if (!edit.appendLines().isEmpty()) {
modified = true;
maybeAdd(diff, edit.appendLines());
newLines.addAll(edit.appendLines());
}
}
List<String> linesToAppend = editor.onComplete();
if (!linesToAppend.isEmpty()) {
modified = true;
newLines.addAll(linesToAppend);
maybeAdd(diff, linesToAppend);
}
if (!modified) {
return false;
}
String diffDescription = diffTooLarge() ? ": Diff too large (" + diffSize + ")" : ":\n" + diff.toString();
logConsumer.accept("Patching file " + name + diffDescription);
consumer.accept(newLines);
return true;
} | class Editor {
private static final Logger logger = Logger.getLogger(Editor.class.getName());
private static final Charset ENCODING = StandardCharsets.UTF_8;
private static final int MAX_LENGTH = 300;
private final Supplier<List<String>> supplier;
private final Consumer<List<String>> consumer;
private final String name;
private final LineEditor editor;
/**
* Read the file which must be encoded in UTF-8, use the LineEditor to edit it,
* and any modifications were done write it back and return true.
*/
public Editor(Path path, LineEditor editor) {
this(path.toString(),
() -> uncheck(() -> Files.readAllLines(path, ENCODING)),
(newLines) -> uncheck(() -> Files.write(path, newLines, ENCODING)),
editor);
}
/**
* @param name The name of what is being edited - used in logging
* @param supplier Supplies the editor with a list of lines to edit
* @param consumer Consumes the lines to presist if any changes is detected
* @param editor The line operations to execute on the lines supplied
*/
public Editor(String name,
Supplier<List<String>> supplier,
Consumer<List<String>> consumer,
LineEditor editor) {
this.supplier = supplier;
this.consumer = consumer;
this.name = name;
this.editor = editor;
}
public boolean converge(TaskContext context) {
return this.edit(line -> context.recordSystemModification(logger, line));
}
private static void maybeAdd(StringBuilder diff, List<String> lines) {
for (String line : lines) {
if (!diffTooLarge(diff)) {
diff.append('+').append(line).append('\n');
}
}
}
private static void maybeRemove(StringBuilder diff, String line) {
if (!diffTooLarge(diff)) {
diff.append('-').append(line).append('\n');
}
}
private static boolean diffTooLarge(StringBuilder diff) {
return diff.length() > MAX_LENGTH;
}
} | class Editor {
private static final Logger logger = Logger.getLogger(Editor.class.getName());
private static final Charset ENCODING = StandardCharsets.UTF_8;
private static final int MAX_LENGTH = 500;
private final Supplier<List<String>> supplier;
private final Consumer<List<String>> consumer;
private final String name;
private final LineEditor editor;
private int diffSize = 0;
/**
* Read the file which must be encoded in UTF-8, use the LineEditor to edit it,
* and any modifications were done write it back and return true.
*/
public Editor(Path path, LineEditor editor) {
this(path.toString(),
() -> uncheck(() -> Files.readAllLines(path, ENCODING)),
(newLines) -> uncheck(() -> Files.write(path, newLines, ENCODING)),
editor);
}
/**
* @param name The name of what is being edited - used in logging
* @param supplier Supplies the editor with a list of lines to edit
* @param consumer Consumes the lines to presist if any changes is detected
* @param editor The line operations to execute on the lines supplied
*/
public Editor(String name,
Supplier<List<String>> supplier,
Consumer<List<String>> consumer,
LineEditor editor) {
this.supplier = supplier;
this.consumer = consumer;
this.name = name;
this.editor = editor;
}
public boolean converge(TaskContext context) {
return this.edit(line -> context.recordSystemModification(logger, line));
}
private void maybeAdd(StringBuilder diff, List<String> lines) {
for (String line : lines) {
diffSize += 2 + line.length();
if (!diffTooLarge()) {
diff.append('+').append(line).append('\n');
}
}
}
private void maybeRemove(StringBuilder diff, String line) {
diffSize += 2 + line.length();
if (!diffTooLarge()) {
diff.append('-').append(line).append('\n');
}
}
private boolean diffTooLarge() {
return diffSize > MAX_LENGTH;
}
} |
Because the "names" of non-configured flavors cannot be parsed back to flavors. Here we should just remove the flavor part once we have no bare metal left (contrary to the node repo which stores both hosts and nodes). | private void toSlime(Flavor flavor, Cursor object) {
if (flavor.isConfigured()) {
object.setString(flavorKey, flavor.name());
}
else {
NodeResources resources = flavor.resources();
Cursor resourcesObject = object.setObject(resourcesKey);
resourcesObject.setDouble(vcpuKey, resources.vcpu());
resourcesObject.setDouble(memoryKey, resources.memoryGb());
resourcesObject.setDouble(diskKey, resources.diskGb());
}
} | object.setString(flavorKey, flavor.name()); | private void toSlime(Flavor flavor, Cursor object) {
if (flavor.isConfigured()) {
object.setString(flavorKey, flavor.name());
}
else {
NodeResources resources = flavor.resources();
Cursor resourcesObject = object.setObject(resourcesKey);
resourcesObject.setDouble(vcpuKey, resources.vcpu());
resourcesObject.setDouble(memoryKey, resources.memoryGb());
resourcesObject.setDouble(diskKey, resources.diskGb());
}
} | class AllocatedHosts {
private static final String mappingKey = "mapping";
private static final String hostSpecKey = "hostSpec";
private static final String hostSpecHostNameKey = "hostName";
private static final String aliasesKey = "aliases";
private static final String hostSpecMembershipKey = "membership";
private static final String flavorKey = "flavor";
private static final String resourcesKey = "resources";
private static final String vcpuKey = "vcpu";
private static final String memoryKey = "memory";
private static final String diskKey = "disk";
/** Wanted version */
private static final String hostSpecVespaVersionKey = "vespaVersion";
/** Current version */
private static final String hostSpecCurrentVespaVersionKey = "currentVespaVersion";
private static final String hostSpecNetworkPortsKey = "ports";
private final ImmutableSet<HostSpec> hosts;
AllocatedHosts(Set<HostSpec> hosts) {
this.hosts = ImmutableSet.copyOf(hosts);
}
public static AllocatedHosts withHosts(Set<HostSpec> hosts) {
return new AllocatedHosts(hosts);
}
private void toSlime(Cursor cursor) {
Cursor array = cursor.setArray(mappingKey);
for (HostSpec host : hosts)
toSlime(host, array.addObject().setObject(hostSpecKey));
}
private void toSlime(HostSpec host, Cursor cursor) {
cursor.setString(hostSpecHostNameKey, host.hostname());
aliasesToSlime(host, cursor);
host.membership().ifPresent(membership -> {
cursor.setString(hostSpecMembershipKey, membership.stringValue());
cursor.setString(hostSpecVespaVersionKey, membership.cluster().vespaVersion().toFullString());
});
host.flavor().ifPresent(flavor -> toSlime(flavor, cursor));
host.version().ifPresent(version -> cursor.setString(hostSpecCurrentVespaVersionKey, version.toFullString()));
host.networkPorts().ifPresent(ports -> NetworkPortsSerializer.toSlime(ports, cursor.setArray(hostSpecNetworkPortsKey)));
}
private void aliasesToSlime(HostSpec spec, Cursor cursor) {
if (spec.aliases().isEmpty()) return;
Cursor aliases = cursor.setArray(aliasesKey);
for (String alias : spec.aliases())
aliases.addString(alias);
}
/** Returns the hosts of this allocation */
public Set<HostSpec> getHosts() { return hosts; }
private static AllocatedHosts fromSlime(Inspector inspector, Optional<NodeFlavors> nodeFlavors) {
Inspector array = inspector.field(mappingKey);
Set<HostSpec> hosts = new LinkedHashSet<>();
array.traverse((ArrayTraverser)(i, host) -> hosts.add(hostFromSlime(host.field(hostSpecKey), nodeFlavors)));
return new AllocatedHosts(hosts);
}
static HostSpec hostFromSlime(Inspector object, Optional<NodeFlavors> nodeFlavors) {
Optional<ClusterMembership> membership =
object.field(hostSpecMembershipKey).valid() ? Optional.of(membershipFromSlime(object)) : Optional.empty();
Optional<Flavor> flavor = flavorFromSlime(object, nodeFlavors);
Optional<com.yahoo.component.Version> version =
optionalString(object.field(hostSpecCurrentVespaVersionKey)).map(com.yahoo.component.Version::new);
Optional<NetworkPorts> networkPorts =
NetworkPortsSerializer.fromSlime(object.field(hostSpecNetworkPortsKey));
return new HostSpec(object.field(hostSpecHostNameKey).asString(), aliasesFromSlime(object), flavor, membership, version, networkPorts);
}
private static List<String> aliasesFromSlime(Inspector object) {
if ( ! object.field(aliasesKey).valid()) return Collections.emptyList();
List<String> aliases = new ArrayList<>();
object.field(aliasesKey).traverse((ArrayTraverser)(index, alias) -> aliases.add(alias.asString()));
return aliases;
}
private static Optional<Flavor> flavorFromSlime(Inspector object, Optional<NodeFlavors> nodeFlavors) {
if (object.field(flavorKey).valid() && nodeFlavors.isPresent() && nodeFlavors.get().exists(object.field(flavorKey).asString())) {
return nodeFlavors.get().getFlavor(object.field(flavorKey).asString());
}
else if (object.field(resourcesKey).valid()) {
Inspector resources = object.field(resourcesKey);
return Optional.of(new Flavor(new NodeResources(resources.field(vcpuKey).asDouble(),
resources.field(memoryKey).asDouble(),
resources.field(diskKey).asDouble())));
}
else {
return Optional.empty();
}
}
private static ClusterMembership membershipFromSlime(Inspector object) {
return ClusterMembership.from(object.field(hostSpecMembershipKey).asString(),
com.yahoo.component.Version.fromString(object.field(hostSpecVespaVersionKey).asString()));
}
private static Optional<String> optionalString(Inspector inspector) {
if ( ! inspector.valid()) return Optional.empty();
return Optional.of(inspector.asString());
}
public byte[] toJson() throws IOException {
Slime slime = new Slime();
toSlime(slime.setObject());
return SlimeUtils.toJsonBytes(slime);
}
public static AllocatedHosts fromJson(byte[] json, Optional<NodeFlavors> nodeFlavors) {
return fromSlime(SlimeUtils.jsonToSlime(json).get(), nodeFlavors);
}
@Override
public boolean equals(Object other) {
if (other == this) return true;
if ( ! (other instanceof AllocatedHosts)) return false;
return ((AllocatedHosts) other).hosts.equals(this.hosts);
}
@Override
public int hashCode() {
return hosts.hashCode();
}
@Override
public String toString() {
return hosts.toString();
}
} | class AllocatedHosts {
private static final String mappingKey = "mapping";
private static final String hostSpecKey = "hostSpec";
private static final String hostSpecHostNameKey = "hostName";
private static final String aliasesKey = "aliases";
private static final String hostSpecMembershipKey = "membership";
private static final String flavorKey = "flavor";
private static final String resourcesKey = "resources";
private static final String vcpuKey = "vcpu";
private static final String memoryKey = "memory";
private static final String diskKey = "disk";
/** Wanted version */
private static final String hostSpecVespaVersionKey = "vespaVersion";
/** Current version */
private static final String hostSpecCurrentVespaVersionKey = "currentVespaVersion";
private static final String hostSpecNetworkPortsKey = "ports";
private final ImmutableSet<HostSpec> hosts;
AllocatedHosts(Set<HostSpec> hosts) {
this.hosts = ImmutableSet.copyOf(hosts);
}
public static AllocatedHosts withHosts(Set<HostSpec> hosts) {
return new AllocatedHosts(hosts);
}
private void toSlime(Cursor cursor) {
Cursor array = cursor.setArray(mappingKey);
for (HostSpec host : hosts)
toSlime(host, array.addObject().setObject(hostSpecKey));
}
private void toSlime(HostSpec host, Cursor cursor) {
cursor.setString(hostSpecHostNameKey, host.hostname());
aliasesToSlime(host, cursor);
host.membership().ifPresent(membership -> {
cursor.setString(hostSpecMembershipKey, membership.stringValue());
cursor.setString(hostSpecVespaVersionKey, membership.cluster().vespaVersion().toFullString());
});
host.flavor().ifPresent(flavor -> toSlime(flavor, cursor));
host.version().ifPresent(version -> cursor.setString(hostSpecCurrentVespaVersionKey, version.toFullString()));
host.networkPorts().ifPresent(ports -> NetworkPortsSerializer.toSlime(ports, cursor.setArray(hostSpecNetworkPortsKey)));
}
private void aliasesToSlime(HostSpec spec, Cursor cursor) {
if (spec.aliases().isEmpty()) return;
Cursor aliases = cursor.setArray(aliasesKey);
for (String alias : spec.aliases())
aliases.addString(alias);
}
/** Returns the hosts of this allocation */
public Set<HostSpec> getHosts() { return hosts; }
private static AllocatedHosts fromSlime(Inspector inspector, Optional<NodeFlavors> nodeFlavors) {
Inspector array = inspector.field(mappingKey);
Set<HostSpec> hosts = new LinkedHashSet<>();
array.traverse((ArrayTraverser)(i, host) -> hosts.add(hostFromSlime(host.field(hostSpecKey), nodeFlavors)));
return new AllocatedHosts(hosts);
}
static HostSpec hostFromSlime(Inspector object, Optional<NodeFlavors> nodeFlavors) {
Optional<ClusterMembership> membership =
object.field(hostSpecMembershipKey).valid() ? Optional.of(membershipFromSlime(object)) : Optional.empty();
Optional<Flavor> flavor = flavorFromSlime(object, nodeFlavors);
Optional<com.yahoo.component.Version> version =
optionalString(object.field(hostSpecCurrentVespaVersionKey)).map(com.yahoo.component.Version::new);
Optional<NetworkPorts> networkPorts =
NetworkPortsSerializer.fromSlime(object.field(hostSpecNetworkPortsKey));
return new HostSpec(object.field(hostSpecHostNameKey).asString(), aliasesFromSlime(object), flavor, membership, version, networkPorts);
}
private static List<String> aliasesFromSlime(Inspector object) {
if ( ! object.field(aliasesKey).valid()) return Collections.emptyList();
List<String> aliases = new ArrayList<>();
object.field(aliasesKey).traverse((ArrayTraverser)(index, alias) -> aliases.add(alias.asString()));
return aliases;
}
private static Optional<Flavor> flavorFromSlime(Inspector object, Optional<NodeFlavors> nodeFlavors) {
if (object.field(flavorKey).valid() && nodeFlavors.isPresent() && nodeFlavors.get().exists(object.field(flavorKey).asString())) {
return nodeFlavors.get().getFlavor(object.field(flavorKey).asString());
}
else if (object.field(resourcesKey).valid()) {
Inspector resources = object.field(resourcesKey);
return Optional.of(new Flavor(new NodeResources(resources.field(vcpuKey).asDouble(),
resources.field(memoryKey).asDouble(),
resources.field(diskKey).asDouble())));
}
else {
return Optional.empty();
}
}
private static ClusterMembership membershipFromSlime(Inspector object) {
return ClusterMembership.from(object.field(hostSpecMembershipKey).asString(),
com.yahoo.component.Version.fromString(object.field(hostSpecVespaVersionKey).asString()));
}
private static Optional<String> optionalString(Inspector inspector) {
if ( ! inspector.valid()) return Optional.empty();
return Optional.of(inspector.asString());
}
public byte[] toJson() throws IOException {
Slime slime = new Slime();
toSlime(slime.setObject());
return SlimeUtils.toJsonBytes(slime);
}
public static AllocatedHosts fromJson(byte[] json, Optional<NodeFlavors> nodeFlavors) {
return fromSlime(SlimeUtils.jsonToSlime(json).get(), nodeFlavors);
}
@Override
public boolean equals(Object other) {
if (other == this) return true;
if ( ! (other instanceof AllocatedHosts)) return false;
return ((AllocatedHosts) other).hosts.equals(this.hosts);
}
@Override
public int hashCode() {
return hosts.hashCode();
}
@Override
public String toString() {
return hosts.toString();
}
} |
Sure, now logs ``` NodeAgent-host002-10: Patching file ip6tables-filter: Diff too large (518) ``` | public boolean edit(Consumer<String> logConsumer) {
List<String> lines = supplier.get();
List<String> newLines = new LinkedList<>();
StringBuilder diff = new StringBuilder();
boolean modified = false;
for (String line : lines) {
LineEdit edit = editor.edit(line);
if (!edit.prependLines().isEmpty()) {
modified = true;
maybeAdd(diff, edit.prependLines());
newLines.addAll(edit.prependLines());
}
switch (edit.getType()) {
case REPLACE:
modified = true;
maybeRemove(diff, line);
break;
case NONE:
newLines.add(line);
break;
default:
throw new IllegalArgumentException("Unknown EditType " + edit.getType());
}
if (!edit.appendLines().isEmpty()) {
modified = true;
maybeAdd(diff, edit.appendLines());
newLines.addAll(edit.appendLines());
}
}
List<String> linesToAppend = editor.onComplete();
if (!linesToAppend.isEmpty()) {
modified = true;
newLines.addAll(linesToAppend);
maybeAdd(diff, linesToAppend);
}
if (!modified) {
return false;
}
String diffDescription = diffTooLarge(diff) ? ": Diff too large" : ":\n" + diff.toString();
logConsumer.accept("Patching file " + name + diffDescription);
consumer.accept(newLines);
return true;
} | String diffDescription = diffTooLarge(diff) ? ": Diff too large" : ":\n" + diff.toString(); | public boolean edit(Consumer<String> logConsumer) {
List<String> lines = supplier.get();
List<String> newLines = new LinkedList<>();
StringBuilder diff = new StringBuilder();
boolean modified = false;
for (String line : lines) {
LineEdit edit = editor.edit(line);
if (!edit.prependLines().isEmpty()) {
modified = true;
maybeAdd(diff, edit.prependLines());
newLines.addAll(edit.prependLines());
}
switch (edit.getType()) {
case REPLACE:
modified = true;
maybeRemove(diff, line);
break;
case NONE:
newLines.add(line);
break;
default:
throw new IllegalArgumentException("Unknown EditType " + edit.getType());
}
if (!edit.appendLines().isEmpty()) {
modified = true;
maybeAdd(diff, edit.appendLines());
newLines.addAll(edit.appendLines());
}
}
List<String> linesToAppend = editor.onComplete();
if (!linesToAppend.isEmpty()) {
modified = true;
newLines.addAll(linesToAppend);
maybeAdd(diff, linesToAppend);
}
if (!modified) {
return false;
}
String diffDescription = diffTooLarge() ? ": Diff too large (" + diffSize + ")" : ":\n" + diff.toString();
logConsumer.accept("Patching file " + name + diffDescription);
consumer.accept(newLines);
return true;
} | class Editor {
private static final Logger logger = Logger.getLogger(Editor.class.getName());
private static final Charset ENCODING = StandardCharsets.UTF_8;
private static final int MAX_LENGTH = 300;
private final Supplier<List<String>> supplier;
private final Consumer<List<String>> consumer;
private final String name;
private final LineEditor editor;
/**
* Read the file which must be encoded in UTF-8, use the LineEditor to edit it,
* and any modifications were done write it back and return true.
*/
public Editor(Path path, LineEditor editor) {
this(path.toString(),
() -> uncheck(() -> Files.readAllLines(path, ENCODING)),
(newLines) -> uncheck(() -> Files.write(path, newLines, ENCODING)),
editor);
}
/**
* @param name The name of what is being edited - used in logging
* @param supplier Supplies the editor with a list of lines to edit
* @param consumer Consumes the lines to presist if any changes is detected
* @param editor The line operations to execute on the lines supplied
*/
public Editor(String name,
Supplier<List<String>> supplier,
Consumer<List<String>> consumer,
LineEditor editor) {
this.supplier = supplier;
this.consumer = consumer;
this.name = name;
this.editor = editor;
}
public boolean converge(TaskContext context) {
return this.edit(line -> context.recordSystemModification(logger, line));
}
private static void maybeAdd(StringBuilder diff, List<String> lines) {
for (String line : lines) {
if (!diffTooLarge(diff)) {
diff.append('+').append(line).append('\n');
}
}
}
private static void maybeRemove(StringBuilder diff, String line) {
if (!diffTooLarge(diff)) {
diff.append('-').append(line).append('\n');
}
}
private static boolean diffTooLarge(StringBuilder diff) {
return diff.length() > MAX_LENGTH;
}
} | class Editor {
private static final Logger logger = Logger.getLogger(Editor.class.getName());
private static final Charset ENCODING = StandardCharsets.UTF_8;
private static final int MAX_LENGTH = 500;
private final Supplier<List<String>> supplier;
private final Consumer<List<String>> consumer;
private final String name;
private final LineEditor editor;
private int diffSize = 0;
/**
* Read the file which must be encoded in UTF-8, use the LineEditor to edit it,
* and any modifications were done write it back and return true.
*/
public Editor(Path path, LineEditor editor) {
this(path.toString(),
() -> uncheck(() -> Files.readAllLines(path, ENCODING)),
(newLines) -> uncheck(() -> Files.write(path, newLines, ENCODING)),
editor);
}
/**
* @param name The name of what is being edited - used in logging
* @param supplier Supplies the editor with a list of lines to edit
* @param consumer Consumes the lines to presist if any changes is detected
* @param editor The line operations to execute on the lines supplied
*/
public Editor(String name,
Supplier<List<String>> supplier,
Consumer<List<String>> consumer,
LineEditor editor) {
this.supplier = supplier;
this.consumer = consumer;
this.name = name;
this.editor = editor;
}
public boolean converge(TaskContext context) {
return this.edit(line -> context.recordSystemModification(logger, line));
}
private void maybeAdd(StringBuilder diff, List<String> lines) {
for (String line : lines) {
diffSize += 2 + line.length();
if (!diffTooLarge()) {
diff.append('+').append(line).append('\n');
}
}
}
private void maybeRemove(StringBuilder diff, String line) {
diffSize += 2 + line.length();
if (!diffTooLarge()) {
diff.append('-').append(line).append('\n');
}
}
private boolean diffTooLarge() {
return diffSize > MAX_LENGTH;
}
} |
I still find this highly useful :p | public void notifyOfCompletion(JobReport report) {
log.log(LogLevel.INFO, String.format("Got notified of %s for %s of %s (%d).",
report.jobError().map(JobError::toString).orElse("success"),
report.jobType(),
report.applicationId(),
report.projectId()));
if ( ! applications().get(report.applicationId()).isPresent()) {
log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() +
"': Unknown application '" + report.applicationId() + "'");
return;
}
applications().lockOrThrow(report.applicationId(), application -> {
JobRun triggering;
if (report.jobType() == component) {
ApplicationVersion applicationVersion = ApplicationVersion.from(report.sourceRevision().get(), report.buildNumber());
triggering = JobRun.triggering(controller.systemVersion(), applicationVersion, empty(), empty(), "Application commit", clock.instant());
if (report.success()) {
if (acceptNewApplicationVersion(application))
application = application.withChange(application.change().with(applicationVersion));
else
application = application.withOutstandingChange(Change.of(applicationVersion));
}
}
else triggering = application.deploymentJobs().statusOf(report.jobType()).flatMap(JobStatus::lastTriggered)
.orElseThrow(() -> new IllegalStateException("Got notified about completion of " + report.jobType().jobName() + " for " +
report.applicationId() + ", but that has neither been triggered nor deployed"));
applications().store(application.withJobCompletion(report.projectId(),
report.jobType(),
triggering.completion(report.buildNumber(), clock.instant()),
report.jobError()));
});
} | log.log(LogLevel.INFO, String.format("Got notified of %s for %s of %s (%d).", | public void notifyOfCompletion(JobReport report) {
log.log(LogLevel.INFO, String.format("Got notified of %s for %s of %s (%d).",
report.jobError().map(JobError::toString).orElse("success"),
report.jobType(),
report.applicationId(),
report.projectId()));
if ( ! applications().get(report.applicationId()).isPresent()) {
log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() +
"': Unknown application '" + report.applicationId() + "'");
return;
}
applications().lockOrThrow(report.applicationId(), application -> {
JobRun triggering;
if (report.jobType() == component) {
ApplicationVersion applicationVersion = ApplicationVersion.from(report.sourceRevision().get(), report.buildNumber());
triggering = JobRun.triggering(controller.systemVersion(), applicationVersion, empty(), empty(), "Application commit", clock.instant());
if (report.success()) {
if (acceptNewApplicationVersion(application))
application = application.withChange(application.change().with(applicationVersion));
else
application = application.withOutstandingChange(Change.of(applicationVersion));
}
}
else triggering = application.deploymentJobs().statusOf(report.jobType()).flatMap(JobStatus::lastTriggered)
.orElseThrow(() -> new IllegalStateException("Got notified about completion of " + report.jobType().jobName() + " for " +
report.applicationId() + ", but that has neither been triggered nor deployed"));
applications().store(application.withJobCompletion(report.projectId(),
report.jobType(),
triggering.completion(report.buildNumber(), clock.instant()),
report.jobError()));
});
} | class DeploymentTrigger {
private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName());
private final Controller controller;
private final Clock clock;
private final DeploymentOrder order;
private final BuildService buildService;
public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) {
Objects.requireNonNull(controller, "controller cannot be null");
Objects.requireNonNull(curator, "curator cannot be null");
Objects.requireNonNull(clock, "clock cannot be null");
this.controller = controller;
this.clock = clock;
this.order = new DeploymentOrder(controller::system);
this.buildService = buildService;
}
public DeploymentOrder deploymentOrder() {
return order;
}
/**
* Called each time a job completes (successfully or not) to record information used when deciding what to trigger.
*/
/**
* Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs.
*
* Only one job is triggered each run for test jobs, since their environments have limited capacity.
*/
public long triggerReadyJobs() {
return computeReadyJobs().collect(partitioningBy(job -> job.jobType().isTest()))
.entrySet().stream()
.flatMap(entry -> (entry.getKey()
? entry.getValue().stream()
.sorted(comparing(Job::isRetry)
.thenComparing(Job::applicationUpgrade)
.reversed()
.thenComparing(Job::availableSince))
.collect(groupingBy(Job::jobType))
: entry.getValue().stream()
.collect(groupingBy(Job::applicationId)))
.values().stream()
.map(jobs -> (Supplier<Long>) jobs.stream()
.filter(job -> canTrigger(job) && trigger(job))
.limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count))
.parallel().map(Supplier::get).reduce(0L, Long::sum);
}
/**
* Attempts to trigger the given job for the given application and returns the outcome.
*
* If the build service can not find the given job, or claims it is illegal to trigger it,
* the project id is removed from the application owning the job, to prevent further trigger attemps.
*/
public boolean trigger(Job job) {
log.log(LogLevel.INFO, String.format("Attempting to trigger %s: %s (%s)", job, job.reason, job.target));
try {
buildService.trigger(job);
applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withJobTriggering(
job.jobType, JobRun.triggering(job.target.targetPlatform, job.target.targetApplication, job.target.sourcePlatform, job.target.sourceApplication, job.reason, clock.instant()))));
return true;
}
catch (RuntimeException e) {
log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e);
if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException)
applications().lockOrThrow(job.applicationId(), application ->
applications().store(application.withProjectId(OptionalLong.empty())));
return false;
}
}
/**
* Triggers a change of this application
*
* @param applicationId the application to trigger
* @throws IllegalArgumentException if this application already has an ongoing change
*/
public void triggerChange(ApplicationId applicationId, Change change) {
applications().lockOrThrow(applicationId, application -> {
if (application.change().isPresent() && ! application.deploymentJobs().hasFailures())
throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " +
application.change() + " is already in progress");
application = application.withChange(change);
if (change.application().isPresent())
application = application.withOutstandingChange(Change.empty());
applications().store(application);
});
}
/** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */
public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) {
applications().lockOrThrow(applicationId, application -> {
applications().store(application.withChange(application.change().application()
.map(Change::of)
.filter(change -> keepApplicationChange)
.orElse(Change.empty())));
});
}
public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() {
return computeReadyJobs().collect(groupingBy(Job::jobType));
}
/** Returns the set of all jobs which have changes to propagate from the upstream steps. */
private Stream<Job> computeReadyJobs() {
return ApplicationList.from(applications().asList())
.notPullRequest()
.withProjectId()
.deploying()
.idList().stream()
.map(this::computeReadyJobs)
.flatMap(List::stream);
}
/** Returns whether the given job is currently running; false if completed since last triggered, asking the build service othewise. */
public boolean isRunning(Application application, JobType jobType) {
return ! application.deploymentJobs().statusOf(jobType)
.flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))).orElse(false)
&& buildService.isRunning(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()));
}
public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType) {
Application application = applications().require(applicationId);
if (jobType == component) {
buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName()));
return singletonList(component);
}
State target = targetFor(application, application.change(), deploymentFor(application, jobType));
String reason = ">:o:< Triggered by force! (-o-) |-o-| (=oo=)";
if (isTested(application, target, jobType)) {
trigger(deploymentJob(application, target, application.change(), jobType, reason, clock.instant(), Collections.emptySet()));
return singletonList(jobType);
}
List<Job> testJobs = testJobsFor(application, target, reason, clock.instant());
testJobs.forEach(this::trigger);
return testJobs.stream().map(Job::jobType).collect(toList());
}
private Job deploymentJob(Application application, State target, Change change, JobType jobType, String reason, Instant availableSince, Collection<JobType> concurrentlyWith) {
boolean isRetry = application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::jobError)
.filter(JobError.outOfCapacity::equals).isPresent();
if (isRetry) reason += "; retrying on out of capacity";
return new Job(application, target, change, jobType, reason, availableSince, concurrentlyWith, isRetry);
}
private Version targetPlatform(Application application, Change change, Optional<Deployment> deployment) {
return max(deployment.map(Deployment::version), change.platform())
.orElse(application.oldestDeployedPlatform()
.orElse(controller.systemVersion()));
}
private ApplicationVersion targetApplication(Application application, Change change, Optional<Deployment> deployment) {
return max(deployment.map(Deployment::applicationVersion), change.application())
.orElse(application.oldestDeployedApplication()
.orElse(application.deploymentJobs().jobStatus().get(component).lastSuccess().get().application()));
}
private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) {
return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2;
}
/**
* Finds the next step to trigger for the given application, if any, and returns these as a list.
*/
private List<Job> computeReadyJobs(ApplicationId id) {
List<Job> jobs = new ArrayList<>();
applications().get(id).ifPresent(application -> {
List<Step> steps = application.deploymentSpec().steps().isEmpty()
? singletonList(new DeploymentSpec.DeclaredZone(test))
: application.deploymentSpec().steps();
List<Step> productionSteps = steps.stream().filter(step -> step.deploysTo(prod) || step.zones().isEmpty()).collect(toList());
Optional<Instant> completedAt = application.deploymentJobs().statusOf(stagingTest)
.flatMap(JobStatus::lastSuccess).map(JobRun::at);
String reason = "New change available";
List<Job> testJobs = null;
for (Step step : productionSteps) {
Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet());
Map<Optional<Instant>, List<JobType>> jobsByCompletion = stepJobs.stream().collect(groupingBy(job -> completedAt(application.change(), application, job)));
if (jobsByCompletion.containsKey(empty())) {
for (JobType job : jobsByCompletion.get(empty())) {
State target = targetFor(application, application.change(), deploymentFor(application, job));
if (isTested(application, target, job)) {
if (completedAt.isPresent())
jobs.add(deploymentJob(application, target, application.change(), job, reason, completedAt.get(), stepJobs));
}
else if (testJobs == null) {
if ( ! alreadyTriggered(application, target))
testJobs = testJobsFor(application, target, "Testing deployment for " + job.jobName(), completedAt.orElse(clock.instant()));
else
testJobs = emptyList();
}
}
}
else {
if (stepJobs.isEmpty()) {
Duration delay = ((DeploymentSpec.Delay) step).duration();
completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant()));
reason += " after a delay of " + delay;
}
else {
completedAt = jobsByCompletion.keySet().stream().map(Optional::get).max(naturalOrder());
reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", "));
}
}
}
if (testJobs == null)
testJobs = testJobsFor(application, targetFor(application, application.change(), empty()), "Testing last changes outside prod", clock.instant());
jobs.addAll(testJobs);
if (steps.stream().flatMap(step -> step.zones().stream()).map(order::toJob)
.allMatch(job -> completedAt(application.change(), application, job).isPresent()))
applications().lockIfPresent(id, lockedApplication -> applications().store(lockedApplication.withChange(Change.empty())));
});
return jobs;
}
private List<Job> testJobsFor(Application application, State target, String reason, Instant availableSince) {
List<Step> steps = application.deploymentSpec().steps();
if (steps.isEmpty()) steps = singletonList(new DeploymentSpec.DeclaredZone(test));
List<Job> jobs = new ArrayList<>();
for (Step step : steps.stream().filter(step -> step.deploysTo(test) || step.deploysTo(staging)).collect(toList())) {
for (JobType jobType : step.zones().stream().map(order::toJob).collect(toList())) {
Optional<JobRun> completion = successOn(application, jobType, target)
.filter(run -> jobType != stagingTest || sourcesMatchIfPresent(target, run));
if (completion.isPresent())
availableSince = completion.get().at();
else if (isTested(application, target, jobType))
jobs.add(deploymentJob(application, target, application.change(), jobType, reason, availableSince, emptySet()));
}
}
return jobs;
}
private boolean isTested(Application application, State target, JobType jobType) {
if (jobType.environment() == staging)
return successOn(application, systemTest, target).isPresent();
if (jobType.environment() == prod)
return successOn(application, stagingTest, target).filter(run -> sourcesMatchIfPresent(target, run)).isPresent()
|| alreadyTriggered(application, target);
return true;
}
/** If the given state's sources are present and differ from its targets, returns whether they are equal to those of the given job run. */
private static boolean sourcesMatchIfPresent(State target, JobRun jobRun) {
return ( ! target.sourcePlatform.filter(version -> ! version.equals(target.targetPlatform)).isPresent()
|| target.sourcePlatform.equals(jobRun.sourcePlatform()))
&& ( ! target.sourceApplication.filter(version -> ! version.equals(target.targetApplication)).isPresent()
|| target.sourceApplication.equals(jobRun.sourceApplication()));
}
private static boolean targetsMatch(State target, JobRun jobRun) {
return target.targetPlatform.equals(jobRun.platform()) && target.targetApplication.equals(jobRun.application());
}
private Optional<Instant> testedAt(Application application, State target) {
return max(successOn(application, systemTest, target).map(JobRun::at),
successOn(application, stagingTest, target).filter(run -> sourcesMatchIfPresent(target, run)).map(JobRun::at));
}
private boolean alreadyTriggered(Application application, State target) {
return application.deploymentJobs().jobStatus().values().stream()
.filter(job -> job.type().isProduction())
.anyMatch(job -> job.lastTriggered()
.filter(run -> targetsMatch(target, run))
.filter(run -> sourcesMatchIfPresent(target, run))
.isPresent());
}
/**
* Returns the instant when the given change is complete for the given application for the given job.
*
* Any job is complete if the given change is already successful on that job.
* A production job is also considered complete if its current change is strictly dominated by what
* is already deployed in its zone, i.e., no parts of the change are upgrades, and at least one
* part is a downgrade, regardless of the status of the job.
*/
private Optional<Instant> completedAt(Change change, Application application, JobType jobType) {
State target = targetFor(application, change, deploymentFor(application, jobType));
Optional<JobRun> lastSuccess = successOn(application, jobType, target);
if (lastSuccess.isPresent() || ! jobType.isProduction())
return lastSuccess.map(JobRun::at);
return deploymentFor(application, jobType)
.filter(deployment -> ! ( change.upgrades(deployment.version())
|| change.upgrades(deployment.applicationVersion()))
&& ( change.downgrades(deployment.version())
|| change.downgrades(deployment.applicationVersion())))
.map(Deployment::at);
}
private Optional<JobRun> successOn(Application application, JobType jobType, State target) {
return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess)
.filter(run -> targetsMatch(target, run));
}
private boolean canTrigger(Job job) {
Application application = applications().require(job.applicationId());
if (isRunning(application, job.jobType))
return false;
if (successOn(application, job.jobType, job.target).filter(run -> sourcesMatchIfPresent(job.target, run)).isPresent())
return false;
if ( ! job.jobType.isProduction())
return true;
if ( ! job.concurrentlyWith.containsAll(runningProductionJobsFor(application)))
return false;
if ( ! job.change.effectiveAt(application.deploymentSpec(), clock.instant()).isPresent())
return false;
return true;
}
private List<JobType> runningProductionJobsFor(Application application) {
return application.deploymentJobs().jobStatus().keySet().parallelStream()
.filter(job -> job.isProduction())
.filter(job -> isRunning(application, job))
.collect(toList());
}
private ApplicationController applications() {
return controller.applications();
}
private boolean acceptNewApplicationVersion(LockedApplication application) {
if (application.change().application().isPresent()) return true;
if (application.deploymentJobs().hasFailures()) return true;
return ! application.changeAt(clock.instant()).platform().isPresent();
}
private Optional<Deployment> deploymentFor(Application application, JobType jobType) {
return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()).get()));
}
private State targetFor(Application application, Change change, Optional<Deployment> deployment) {
return new State(targetPlatform(application, change, deployment),
targetApplication(application, change, deployment),
deployment.map(Deployment::version),
deployment.map(Deployment::applicationVersion));
}
private static class Job extends BuildJob {
private final Change change;
private final JobType jobType;
private final String reason;
private final Instant availableSince;
private final Collection<JobType> concurrentlyWith;
private final boolean isRetry;
private final boolean isApplicationUpgrade;
private final State target;
private Job(Application application, State target, Change change, JobType jobType, String reason, Instant availableSince, Collection<JobType> concurrentlyWith, boolean isRetry) {
super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName());
this.change = change;
this.jobType = jobType;
this.availableSince = availableSince;
this.concurrentlyWith = concurrentlyWith;
this.reason = reason;
this.isRetry = isRetry;
this.isApplicationUpgrade = change.application().isPresent();
this.target = target;
}
JobType jobType() { return jobType; }
Instant availableSince() { return availableSince; }
boolean isRetry() { return isRetry; }
boolean applicationUpgrade() { return isApplicationUpgrade; }
}
public static class State {
private final Version targetPlatform;
private final ApplicationVersion targetApplication;
private final Optional<Version> sourcePlatform;
private final Optional<ApplicationVersion> sourceApplication;
public State(Version targetPlatform, ApplicationVersion targetApplication, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication) {
this.targetPlatform = targetPlatform;
this.targetApplication = targetApplication;
this.sourcePlatform = sourcePlatform;
this.sourceApplication = sourceApplication;
}
public Version targetPlatform() { return targetPlatform; }
public ApplicationVersion targetApplication() { return targetApplication; }
public Optional<Version> sourcePlatform() { return sourcePlatform; }
public Optional<ApplicationVersion> sourceApplication() { return sourceApplication; }
@Override
public String toString() {
return String.format("platform %s%s, application %s%s",
targetPlatform,
sourcePlatform.filter(version -> ! version.equals(targetPlatform))
.map(v -> " (from " + v + ")").orElse(""),
targetApplication.id(),
sourceApplication.filter(version -> ! version.equals(targetApplication))
.map(v -> " (from " + v.id() + ")").orElse(""));
}
}
} | class DeploymentTrigger {
private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName());
private final Controller controller;
private final Clock clock;
private final DeploymentOrder order;
private final BuildService buildService;
public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) {
Objects.requireNonNull(controller, "controller cannot be null");
Objects.requireNonNull(curator, "curator cannot be null");
Objects.requireNonNull(clock, "clock cannot be null");
this.controller = controller;
this.clock = clock;
this.order = new DeploymentOrder(controller::system);
this.buildService = buildService;
}
public DeploymentOrder deploymentOrder() {
return order;
}
/**
* Called each time a job completes (successfully or not) to record information used when deciding what to trigger.
*/
/**
* Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs.
*
* Only one job is triggered each run for test jobs, since their environments have limited capacity.
*/
public long triggerReadyJobs() {
return computeReadyJobs().collect(partitioningBy(job -> job.jobType().isTest()))
.entrySet().stream()
.flatMap(entry -> (entry.getKey()
? entry.getValue().stream()
.sorted(comparing(Job::isRetry)
.thenComparing(Job::applicationUpgrade)
.reversed()
.thenComparing(Job::availableSince))
.collect(groupingBy(Job::jobType))
: entry.getValue().stream()
.collect(groupingBy(Job::applicationId)))
.values().stream()
.map(jobs -> (Supplier<Long>) jobs.stream()
.filter(job -> canTrigger(job) && trigger(job))
.limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count))
.parallel().map(Supplier::get).reduce(0L, Long::sum);
}
/**
* Attempts to trigger the given job for the given application and returns the outcome.
*
* If the build service can not find the given job, or claims it is illegal to trigger it,
* the project id is removed from the application owning the job, to prevent further trigger attemps.
*/
public boolean trigger(Job job) {
log.log(LogLevel.INFO, String.format("Attempting to trigger %s: %s (%s)", job, job.reason, job.target));
try {
buildService.trigger(job);
applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withJobTriggering(
job.jobType, JobRun.triggering(job.target.targetPlatform, job.target.targetApplication, job.target.sourcePlatform, job.target.sourceApplication, job.reason, clock.instant()))));
return true;
}
catch (RuntimeException e) {
log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e);
if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException)
applications().lockOrThrow(job.applicationId(), application ->
applications().store(application.withProjectId(OptionalLong.empty())));
return false;
}
}
/**
* Triggers a change of this application
*
* @param applicationId the application to trigger
* @throws IllegalArgumentException if this application already has an ongoing change
*/
public void triggerChange(ApplicationId applicationId, Change change) {
applications().lockOrThrow(applicationId, application -> {
if (application.change().isPresent() && ! application.deploymentJobs().hasFailures())
throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " +
application.change() + " is already in progress");
application = application.withChange(change);
if (change.application().isPresent())
application = application.withOutstandingChange(Change.empty());
applications().store(application);
});
}
/** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */
public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) {
applications().lockOrThrow(applicationId, application -> {
applications().store(application.withChange(application.change().application()
.map(Change::of)
.filter(change -> keepApplicationChange)
.orElse(Change.empty())));
});
}
public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() {
return computeReadyJobs().collect(groupingBy(Job::jobType));
}
/** Returns the set of all jobs which have changes to propagate from the upstream steps. */
private Stream<Job> computeReadyJobs() {
return ApplicationList.from(applications().asList())
.notPullRequest()
.withProjectId()
.deploying()
.idList().stream()
.map(this::computeReadyJobs)
.flatMap(List::stream);
}
/** Returns whether the given job is currently running; false if completed since last triggered, asking the build service othewise. */
public boolean isRunning(Application application, JobType jobType) {
return ! application.deploymentJobs().statusOf(jobType)
.flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))).orElse(false)
&& buildService.isRunning(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()));
}
public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType) {
Application application = applications().require(applicationId);
if (jobType == component) {
buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName()));
return singletonList(component);
}
State target = targetFor(application, application.change(), deploymentFor(application, jobType));
String reason = ">:o:< Triggered by force! (-o-) |-o-| (=oo=)";
if (isTested(application, target, jobType)) {
trigger(deploymentJob(application, target, application.change(), jobType, reason, clock.instant(), Collections.emptySet()));
return singletonList(jobType);
}
List<Job> testJobs = testJobsFor(application, target, reason, clock.instant());
testJobs.forEach(this::trigger);
return testJobs.stream().map(Job::jobType).collect(toList());
}
private Job deploymentJob(Application application, State target, Change change, JobType jobType, String reason, Instant availableSince, Collection<JobType> concurrentlyWith) {
boolean isRetry = application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::jobError)
.filter(JobError.outOfCapacity::equals).isPresent();
if (isRetry) reason += "; retrying on out of capacity";
return new Job(application, target, change, jobType, reason, availableSince, concurrentlyWith, isRetry);
}
private Version targetPlatform(Application application, Change change, Optional<Deployment> deployment) {
return max(deployment.map(Deployment::version), change.platform())
.orElse(application.oldestDeployedPlatform()
.orElse(controller.systemVersion()));
}
private ApplicationVersion targetApplication(Application application, Change change, Optional<Deployment> deployment) {
return max(deployment.map(Deployment::applicationVersion), change.application())
.orElse(application.oldestDeployedApplication()
.orElse(application.deploymentJobs().jobStatus().get(component).lastSuccess().get().application()));
}
private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) {
return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2;
}
/**
* Finds the next step to trigger for the given application, if any, and returns these as a list.
*/
private List<Job> computeReadyJobs(ApplicationId id) {
List<Job> jobs = new ArrayList<>();
applications().get(id).ifPresent(application -> {
List<Step> steps = application.deploymentSpec().steps().isEmpty()
? singletonList(new DeploymentSpec.DeclaredZone(test))
: application.deploymentSpec().steps();
List<Step> productionSteps = steps.stream().filter(step -> step.deploysTo(prod) || step.zones().isEmpty()).collect(toList());
Optional<Instant> completedAt = application.deploymentJobs().statusOf(stagingTest)
.flatMap(JobStatus::lastSuccess).map(JobRun::at);
String reason = "New change available";
List<Job> testJobs = null;
for (Step step : productionSteps) {
Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet());
Map<Optional<Instant>, List<JobType>> jobsByCompletion = stepJobs.stream().collect(groupingBy(job -> completedAt(application.change(), application, job)));
if (jobsByCompletion.containsKey(empty())) {
for (JobType job : jobsByCompletion.get(empty())) {
State target = targetFor(application, application.change(), deploymentFor(application, job));
if (isTested(application, target, job)) {
if (completedAt.isPresent())
jobs.add(deploymentJob(application, target, application.change(), job, reason, completedAt.get(), stepJobs));
}
else if (testJobs == null) {
if ( ! alreadyTriggered(application, target))
testJobs = testJobsFor(application, target, "Testing deployment for " + job.jobName(), completedAt.orElse(clock.instant()));
else
testJobs = emptyList();
}
}
}
else {
if (stepJobs.isEmpty()) {
Duration delay = ((DeploymentSpec.Delay) step).duration();
completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant()));
reason += " after a delay of " + delay;
}
else {
completedAt = jobsByCompletion.keySet().stream().map(Optional::get).max(naturalOrder());
reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", "));
}
}
}
if (testJobs == null)
testJobs = testJobsFor(application, targetFor(application, application.change(), empty()), "Testing last changes outside prod", clock.instant());
jobs.addAll(testJobs);
if (steps.stream().flatMap(step -> step.zones().stream()).map(order::toJob)
.allMatch(job -> completedAt(application.change(), application, job).isPresent()))
applications().lockIfPresent(id, lockedApplication -> applications().store(lockedApplication.withChange(Change.empty())));
});
return jobs;
}
private List<Job> testJobsFor(Application application, State target, String reason, Instant availableSince) {
List<Step> steps = application.deploymentSpec().steps();
if (steps.isEmpty()) steps = singletonList(new DeploymentSpec.DeclaredZone(test));
List<Job> jobs = new ArrayList<>();
for (Step step : steps.stream().filter(step -> step.deploysTo(test) || step.deploysTo(staging)).collect(toList())) {
for (JobType jobType : step.zones().stream().map(order::toJob).collect(toList())) {
Optional<JobRun> completion = successOn(application, jobType, target)
.filter(run -> jobType != stagingTest || sourcesMatchIfPresent(target, run));
if (completion.isPresent())
availableSince = completion.get().at();
else if (isTested(application, target, jobType))
jobs.add(deploymentJob(application, target, application.change(), jobType, reason, availableSince, emptySet()));
}
}
return jobs;
}
private boolean isTested(Application application, State target, JobType jobType) {
if (jobType.environment() == staging)
return successOn(application, systemTest, target).isPresent();
if (jobType.environment() == prod)
return successOn(application, stagingTest, target).filter(run -> sourcesMatchIfPresent(target, run)).isPresent()
|| alreadyTriggered(application, target);
return true;
}
/** If the given state's sources are present and differ from its targets, returns whether they are equal to those of the given job run. */
private static boolean sourcesMatchIfPresent(State target, JobRun jobRun) {
return ( ! target.sourcePlatform.filter(version -> ! version.equals(target.targetPlatform)).isPresent()
|| target.sourcePlatform.equals(jobRun.sourcePlatform()))
&& ( ! target.sourceApplication.filter(version -> ! version.equals(target.targetApplication)).isPresent()
|| target.sourceApplication.equals(jobRun.sourceApplication()));
}
private static boolean targetsMatch(State target, JobRun jobRun) {
return target.targetPlatform.equals(jobRun.platform()) && target.targetApplication.equals(jobRun.application());
}
private Optional<Instant> testedAt(Application application, State target) {
return max(successOn(application, systemTest, target).map(JobRun::at),
successOn(application, stagingTest, target).filter(run -> sourcesMatchIfPresent(target, run)).map(JobRun::at));
}
private boolean alreadyTriggered(Application application, State target) {
return application.deploymentJobs().jobStatus().values().stream()
.filter(job -> job.type().isProduction())
.anyMatch(job -> job.lastTriggered()
.filter(run -> targetsMatch(target, run))
.filter(run -> sourcesMatchIfPresent(target, run))
.isPresent());
}
/**
* Returns the instant when the given change is complete for the given application for the given job.
*
* Any job is complete if the given change is already successful on that job.
* A production job is also considered complete if its current change is strictly dominated by what
* is already deployed in its zone, i.e., no parts of the change are upgrades, and at least one
* part is a downgrade, regardless of the status of the job.
*/
private Optional<Instant> completedAt(Change change, Application application, JobType jobType) {
State target = targetFor(application, change, deploymentFor(application, jobType));
Optional<JobRun> lastSuccess = successOn(application, jobType, target);
if (lastSuccess.isPresent() || ! jobType.isProduction())
return lastSuccess.map(JobRun::at);
return deploymentFor(application, jobType)
.filter(deployment -> ! ( change.upgrades(deployment.version())
|| change.upgrades(deployment.applicationVersion()))
&& ( change.downgrades(deployment.version())
|| change.downgrades(deployment.applicationVersion())))
.map(Deployment::at);
}
private Optional<JobRun> successOn(Application application, JobType jobType, State target) {
return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess)
.filter(run -> targetsMatch(target, run));
}
private boolean canTrigger(Job job) {
Application application = applications().require(job.applicationId());
if (isRunning(application, job.jobType))
return false;
if (successOn(application, job.jobType, job.target).filter(run -> sourcesMatchIfPresent(job.target, run)).isPresent())
return false;
if ( ! job.jobType.isProduction())
return true;
if ( ! job.concurrentlyWith.containsAll(runningProductionJobsFor(application)))
return false;
if ( ! job.change.effectiveAt(application.deploymentSpec(), clock.instant()).isPresent())
return false;
return true;
}
private List<JobType> runningProductionJobsFor(Application application) {
return application.deploymentJobs().jobStatus().keySet().parallelStream()
.filter(job -> job.isProduction())
.filter(job -> isRunning(application, job))
.collect(toList());
}
private ApplicationController applications() {
return controller.applications();
}
private boolean acceptNewApplicationVersion(LockedApplication application) {
if (application.change().application().isPresent()) return true;
if (application.deploymentJobs().hasFailures()) return true;
return ! application.changeAt(clock.instant()).platform().isPresent();
}
private Optional<Deployment> deploymentFor(Application application, JobType jobType) {
return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()).get()));
}
private State targetFor(Application application, Change change, Optional<Deployment> deployment) {
return new State(targetPlatform(application, change, deployment),
targetApplication(application, change, deployment),
deployment.map(Deployment::version),
deployment.map(Deployment::applicationVersion));
}
private static class Job extends BuildJob {
private final Change change;
private final JobType jobType;
private final String reason;
private final Instant availableSince;
private final Collection<JobType> concurrentlyWith;
private final boolean isRetry;
private final boolean isApplicationUpgrade;
private final State target;
private Job(Application application, State target, Change change, JobType jobType, String reason, Instant availableSince, Collection<JobType> concurrentlyWith, boolean isRetry) {
super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName());
this.change = change;
this.jobType = jobType;
this.availableSince = availableSince;
this.concurrentlyWith = concurrentlyWith;
this.reason = reason;
this.isRetry = isRetry;
this.isApplicationUpgrade = change.application().isPresent();
this.target = target;
}
JobType jobType() { return jobType; }
Instant availableSince() { return availableSince; }
boolean isRetry() { return isRetry; }
boolean applicationUpgrade() { return isApplicationUpgrade; }
}
public static class State {
private final Version targetPlatform;
private final ApplicationVersion targetApplication;
private final Optional<Version> sourcePlatform;
private final Optional<ApplicationVersion> sourceApplication;
public State(Version targetPlatform, ApplicationVersion targetApplication, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication) {
this.targetPlatform = targetPlatform;
this.targetApplication = targetApplication;
this.sourcePlatform = sourcePlatform;
this.sourceApplication = sourceApplication;
}
public Version targetPlatform() { return targetPlatform; }
public ApplicationVersion targetApplication() { return targetApplication; }
public Optional<Version> sourcePlatform() { return sourcePlatform; }
public Optional<ApplicationVersion> sourceApplication() { return sourceApplication; }
@Override
public String toString() {
return String.format("platform %s%s, application %s%s",
targetPlatform,
sourcePlatform.filter(version -> ! version.equals(targetPlatform))
.map(v -> " (from " + v + ")").orElse(""),
targetApplication.id(),
sourceApplication.filter(version -> ! version.equals(targetApplication))
.map(v -> " (from " + v.id() + ")").orElse(""));
}
}
} |
`if (false`? And the optional is always empty. | public void deactivate(Application application, ZoneId zone) {
Optional<Deployment> deployment = Optional.empty();
if (false && deployment.isPresent()
&& ! DeploymentExpirer.hasExpired(controller.zoneRegistry(), deployment.get(), clock.instant()))
return;
lockOrThrow(application.id(), lockedApplication -> store(deactivate(lockedApplication, zone)));
} | if (false && deployment.isPresent() | public void deactivate(Application application, ZoneId zone) {
lockOrThrow(application.id(), lockedApplication -> store(deactivate(lockedApplication, zone)));
} | class ApplicationController {
private static final Logger log = Logger.getLogger(ApplicationController.class.getName());
/** The controller owning this */
private final Controller controller;
/** For persistence */
private final CuratorDb curator;
private final ArtifactRepository artifactRepository;
private final RotationRepository rotationRepository;
private final AthenzClientFactory zmsClientFactory;
private final NameService nameService;
private final ConfigServer configServer;
private final RoutingGenerator routingGenerator;
private final Clock clock;
private final DeploymentTrigger deploymentTrigger;
ApplicationController(Controller controller, CuratorDb curator,
AthenzClientFactory zmsClientFactory, RotationsConfig rotationsConfig,
NameService nameService, ConfigServer configServer,
ArtifactRepository artifactRepository,
RoutingGenerator routingGenerator, BuildService buildService, Clock clock) {
this.controller = controller;
this.curator = curator;
this.zmsClientFactory = zmsClientFactory;
this.nameService = nameService;
this.configServer = configServer;
this.routingGenerator = routingGenerator;
this.clock = clock;
this.artifactRepository = artifactRepository;
this.rotationRepository = new RotationRepository(rotationsConfig, this, curator);
this.deploymentTrigger = new DeploymentTrigger(controller, curator, buildService, clock);
for (Application application : curator.readApplications()) {
lockIfPresent(application.id(), this::store);
}
}
/** Returns the application with the given id, or null if it is not present */
public Optional<Application> get(ApplicationId id) {
return curator.readApplication(id);
}
/**
* Returns the application with the given id
*
* @throws IllegalArgumentException if it does not exist
*/
public Application require(ApplicationId id) {
return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found"));
}
/** Returns a snapshot of all applications */
public List<Application> asList() {
return sort(curator.readApplications());
}
/** Returns all applications of a tenant */
public List<Application> asList(TenantName tenant) {
return sort(curator.readApplications(tenant));
}
/**
* Set the rotations marked as 'global' either 'in' or 'out of' service.
*
* @return The canonical endpoint altered if any
* @throws IOException if rotation status cannot be updated
*/
public List<String> setGlobalRotationStatus(DeploymentId deploymentId, EndpointStatus status) throws IOException {
List<String> rotations = new ArrayList<>();
Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId);
if (endpoint.isPresent()) {
configServer.setGlobalRotationStatus(deploymentId, endpoint.get(), status);
rotations.add(endpoint.get());
}
return rotations;
}
/**
* Get the endpoint status for the global endpoint of this application
*
* @return Map between the endpoint and the rotation status
* @throws IOException if global rotation status cannot be determined
*/
public Map<String, EndpointStatus> getGlobalRotationStatus(DeploymentId deploymentId) throws IOException {
Map<String, EndpointStatus> result = new HashMap<>();
Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId);
if (endpoint.isPresent()) {
EndpointStatus status = configServer.getGlobalRotationStatus(deploymentId, endpoint.get());
result.put(endpoint.get(), status);
}
return result;
}
/**
* Global rotations (plural as we can have aliases) map to exactly one service endpoint.
* This method finds that one service endpoint and strips the URI part that
* the routingGenerator is wrapping around the endpoint.
*
* @param deploymentId The deployment to retrieve global service endpoint for
* @return Empty if no global endpoint exist, otherwise the service endpoint ([clustername.]app.tenant.region.env)
*/
Optional<String> getCanonicalGlobalEndpoint(DeploymentId deploymentId) throws IOException {
Map<String, RoutingEndpoint> hostToGlobalEndpoint = new HashMap<>();
Map<String, String> hostToCanonicalEndpoint = new HashMap<>();
for (RoutingEndpoint endpoint : routingGenerator.endpoints(deploymentId)) {
try {
URI uri = new URI(endpoint.getEndpoint());
String serviceEndpoint = uri.getHost();
if (serviceEndpoint == null) {
throw new IOException("Unexpected endpoints returned from the Routing Generator");
}
String canonicalEndpoint = serviceEndpoint.replaceAll(".vespa.yahooapis.com", "");
String hostname = endpoint.getHostname();
if (hostname != null) {
if (endpoint.isGlobal()) {
hostToGlobalEndpoint.put(hostname, endpoint);
} else {
hostToCanonicalEndpoint.put(hostname, canonicalEndpoint);
}
if (hostToGlobalEndpoint.containsKey(hostname) && hostToCanonicalEndpoint.containsKey(hostname)) {
return Optional.of(hostToCanonicalEndpoint.get(hostname));
}
}
} catch (URISyntaxException use) {
throw new IOException(use);
}
}
return Optional.empty();
}
/**
* Creates a new application for an existing tenant.
*
* @throws IllegalArgumentException if the application already exists
*/
public Application createApplication(ApplicationId id, Optional<NToken> token) {
if ( ! (id.instance().isDefault() || id.instance().value().matches("\\d+")))
throw new UnsupportedOperationException("Only the instance names 'default' and names which are just the PR number are supported at the moment");
try (Lock lock = lock(id)) {
if (asList(id.tenant()).stream().noneMatch(application -> application.id().application().equals(id.application())))
com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value());
Optional<Tenant> tenant = controller.tenants().tenant(id.tenant());
if ( ! tenant.isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist");
if (get(id).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application already exists");
if (get(dashToUnderscore(id)).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists");
if (id.instance().isDefault() && tenant.get() instanceof AthenzTenant) {
if ( ! token.isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': No NToken provided");
ZmsClient zmsClient = zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get());
zmsClient.addApplication(((AthenzTenant) tenant.get()).domain(),
new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value()));
}
LockedApplication application = new LockedApplication(new Application(id), lock);
store(application);
log.info("Created " + application);
return application;
}
}
/** Deploys an application. If the application does not exist it is created. */
public ActivateResult deployApplication(ApplicationId applicationId, ZoneId zone,
Optional<ApplicationPackage> applicationPackageFromDeployer,
DeployOptions options) {
try (Lock lock = lock(applicationId)) {
LockedApplication application = get(applicationId)
.map(app -> new LockedApplication(app, lock))
.orElseGet(() -> new LockedApplication(createApplication(applicationId, Optional.empty()), lock));
boolean canDeployDirectly = ! options.screwdriverBuildJob.map(job1 -> job1.screwdriverId).isPresent()
|| zone.environment().isManuallyDeployed();
boolean preferOldestVersion = options.deployCurrentVersion;
Version platformVersion;
ApplicationVersion applicationVersion;
ApplicationPackage applicationPackage;
if (canDeployDirectly) {
platformVersion = options.vespaVersion.map(Version::new).orElse(controller.systemVersion());
applicationVersion = ApplicationVersion.unknown;
applicationPackage = applicationPackageFromDeployer.orElseThrow(
() -> new IllegalArgumentException("Application package must be given when deploying to " + zone));
} else {
JobType jobType = JobType.from(controller.system(), zone)
.orElseThrow(() -> new IllegalArgumentException("No job found for zone " + zone));
Optional<JobRun> triggered = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType))
.flatMap(JobStatus::lastTriggered);
if ( ! triggered.isPresent())
return unexpectedDeployment(applicationId, zone);
platformVersion = preferOldestVersion
? triggered.get().sourcePlatform().orElse(triggered.get().platform())
: triggered.get().platform();
applicationVersion = preferOldestVersion
? triggered.get().sourceApplication().orElse(triggered.get().application())
: triggered.get().application();
applicationPackage = new ApplicationPackage(artifactRepository.getApplicationPackage(application.id(), applicationVersion.id()));
validateRun(application, zone, platformVersion, applicationVersion);
}
validate(applicationPackage.deploymentSpec());
if ( ! preferOldestVersion) {
application = application.with(applicationPackage.deploymentSpec());
application = application.with(applicationPackage.validationOverrides());
application = deleteRemovedDeployments(application);
application = deleteUnreferencedDeploymentJobs(application);
store(application);
}
application = withRotation(application, zone);
Set<String> rotationNames = new HashSet<>();
Set<String> cnames = new HashSet<>();
application.rotation().ifPresent(applicationRotation -> {
rotationNames.add(applicationRotation.id().asString());
cnames.add(applicationRotation.dnsName());
cnames.add(applicationRotation.secureDnsName());
});
options = withVersion(platformVersion, options);
DeploymentId deploymentId = new DeploymentId(applicationId, zone);
ConfigServer.PreparedApplication preparedApplication =
configServer.deploy(deploymentId, options, cnames, rotationNames, applicationPackage.zippedContent());
application = application.withNewDeployment(zone, applicationVersion, platformVersion, clock.instant());
store(application);
return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(),
applicationPackage.zippedContent().length);
}
}
/** Makes sure the application has a global rotation, if eligible. */
private LockedApplication withRotation(LockedApplication application, ZoneId zone) {
if (zone.environment() == Environment.prod && application.deploymentSpec().globalServiceId().isPresent()) {
try (RotationLock rotationLock = rotationRepository.lock()) {
Rotation rotation = rotationRepository.getOrAssignRotation(application, rotationLock);
application = application.with(rotation.id());
store(application);
registerRotationInDns(rotation, application.rotation().get().dnsName());
registerRotationInDns(rotation, application.rotation().get().secureDnsName());
}
}
return application;
}
private ActivateResult unexpectedDeployment(ApplicationId applicationId, ZoneId zone) {
Log logEntry = new Log();
logEntry.level = "WARNING";
logEntry.time = clock.instant().toEpochMilli();
logEntry.message = "Ignoring deployment of " + require(applicationId) + " to " + zone +
" as a deployment is not currently expected";
PrepareResponse prepareResponse = new PrepareResponse();
prepareResponse.log = Collections.singletonList(logEntry);
prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList());
return new ActivateResult(new RevisionId("0"), prepareResponse, 0);
}
private LockedApplication deleteRemovedDeployments(LockedApplication application) {
List<Deployment> deploymentsToRemove = application.productionDeployments().values().stream()
.filter(deployment -> ! application.deploymentSpec().includes(deployment.zone().environment(),
Optional.of(deployment.zone().region())))
.collect(Collectors.toList());
if (deploymentsToRemove.isEmpty()) return application;
if ( ! application.validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant()))
throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application +
" is deployed in " +
deploymentsToRemove.stream()
.map(deployment -> deployment.zone().region().value())
.collect(Collectors.joining(", ")) +
", but does not include " +
(deploymentsToRemove.size() > 1 ? "these zones" : "this zone") +
" in deployment.xml");
LockedApplication applicationWithRemoval = application;
for (Deployment deployment : deploymentsToRemove)
applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone());
return applicationWithRemoval;
}
private LockedApplication deleteUnreferencedDeploymentJobs(LockedApplication application) {
for (JobType job : application.deploymentJobs().jobStatus().keySet()) {
Optional<ZoneId> zone = job.zone(controller.system());
if ( ! job.isProduction() || (zone.isPresent() && application.deploymentSpec().includes(zone.get().environment(), zone.map(ZoneId::region))))
continue;
application = application.withoutDeploymentJob(job);
}
return application;
}
private DeployOptions withVersion(Version version, DeployOptions options) {
return new DeployOptions(options.screwdriverBuildJob,
Optional.of(version),
options.ignoreValidationErrors,
options.deployCurrentVersion);
}
/** Register a DNS name for rotation */
private void registerRotationInDns(Rotation rotation, String dnsName) {
try {
Optional<Record> record = nameService.findRecord(Record.Type.CNAME, RecordName.from(dnsName));
RecordData rotationName = RecordData.fqdn(rotation.name());
if (record.isPresent()) {
if ( ! record.get().data().equals(rotationName)) {
nameService.updateRecord(record.get().id(), rotationName);
log.info("Updated mapping for record ID " + record.get().id().asString() + ": '" + dnsName
+ "' -> '" + rotation.name() + "'");
}
} else {
RecordId id = nameService.createCname(RecordName.from(dnsName), rotationName);
log.info("Registered mapping with record ID " + id.asString() + ": '" + dnsName + "' -> '"
+ rotation.name() + "'");
}
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to register CNAME", e);
}
}
/** Returns the endpoints of the deployment, or an empty list if the request fails */
public Optional<List<URI>> getDeploymentEndpoints(DeploymentId deploymentId) {
try {
return Optional.of(ImmutableList.copyOf(routingGenerator.endpoints(deploymentId).stream()
.map(RoutingEndpoint::getEndpoint)
.map(URI::create)
.iterator()));
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId + ": "
+ Exceptions.toMessageString(e));
return Optional.empty();
}
}
/**
* Deletes the the given application. All known instances of the applications will be deleted,
* including PR instances.
*
* @throws IllegalArgumentException if the application has deployments or the caller is not authorized
* @throws NotExistsException if no instances of the application exist
*/
public void deleteApplication(ApplicationId applicationId, Optional<NToken> token) {
List<ApplicationId> instances = controller.applications().asList(applicationId.tenant())
.stream()
.map(Application::id)
.filter(id -> id.application().equals(applicationId.application()) &&
id.tenant().equals(applicationId.tenant()))
.collect(Collectors.toList());
if (instances.isEmpty()) {
throw new NotExistsException("Could not delete application '" + applicationId + "': Application not found");
}
instances.forEach(id -> lockOrThrow(id, application -> {
if ( ! application.deployments().isEmpty())
throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments");
Tenant tenant = controller.tenants().tenant(id.tenant()).get();
if (tenant instanceof AthenzTenant && ! token.isPresent())
throw new IllegalArgumentException("Could not delete '" + application + "': No NToken provided");
if (id.instance().isDefault() && tenant instanceof AthenzTenant) {
zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get())
.deleteApplication(((AthenzTenant) tenant).domain(),
new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value()));
}
curator.removeApplication(id);
log.info("Deleted " + application);
}));
}
/**
* Replace any previous version of this application by this instance
*
* @param application a locked application to store
*/
public void store(LockedApplication application) {
curator.writeApplication(application);
}
/**
* Acquire a locked application to modify and store, if there is an application with the given id.
*
* @param applicationId ID of the application to lock and get.
* @param action Function which acts on the locked application.
*/
public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action);
}
}
/**
* Acquire a locked application to modify and store, or throw an exception if no application has the given id.
*
* @param applicationId ID of the application to lock and require.
* @param action Function which acts on the locked application.
* @throws IllegalArgumentException when application does not exist.
*/
public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
action.accept(new LockedApplication(require(applicationId), lock));
}
}
/**
* Tells config server to schedule a restart of all nodes in this deployment
*
* @param hostname If non-empty, restart will only be scheduled for this host
*/
public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) {
try {
configServer.restart(deploymentId, hostname);
}
catch (NoInstanceException e) {
throw new IllegalArgumentException("Could not restart " + deploymentId + ": No such deployment");
}
}
/** Deactivate application in the given zone */
/**
* Deactivates a locked application without storing it
*
* @return the application with the deployment in the given zone removed
*/
private LockedApplication deactivate(LockedApplication application, ZoneId zone) {
try {
configServer.deactivate(new DeploymentId(application.id(), zone));
}
catch (NoInstanceException ignored) {
}
return application.withoutDeploymentIn(zone);
}
public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; }
private ApplicationId dashToUnderscore(ApplicationId id) {
return ApplicationId.from(id.tenant().value(),
id.application().value().replaceAll("-", "_"),
id.instance().value());
}
public ConfigServer configServer() { return configServer; }
/**
* Returns a lock which provides exclusive rights to changing this application.
* Any operation which stores an application need to first acquire this lock, then read, modify
* and store the application, and finally release (close) the lock.
*/
Lock lock(ApplicationId application) {
return curator.lock(application, Duration.ofMinutes(10));
}
/** Verify that each of the production zones listed in the deployment spec exist in this system. */
private void validate(DeploymentSpec deploymentSpec) {
deploymentSpec.zones().stream()
.filter(zone -> zone.environment() == Environment.prod)
.forEach(zone -> {
if ( ! controller.zoneRegistry().hasZone(ZoneId.from(zone.environment(),
zone.region().orElse(null)))) {
throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!");
}
});
}
/** Verify that we don't downgrade an existing production deployment. */
private void validateRun(Application application, ZoneId zone, Version platformVersion, ApplicationVersion applicationVersion) {
Deployment deployment = application.deployments().get(zone);
if ( zone.environment().isProduction() && deployment != null
&& ( platformVersion.compareTo(deployment.version()) < 0
|| applicationVersion.compareTo(deployment.applicationVersion()) < 0))
throw new IllegalArgumentException(String.format("Rejecting deployment of %s to %s, as the requested versions (platform: %s, application: %s)" +
" are older than the currently deployed (platform: %s, application: %s).",
application, zone, platformVersion, applicationVersion, deployment.version(), deployment.applicationVersion()));
}
public RotationRepository rotationRepository() {
return rotationRepository;
}
/** Sort given list of applications by application ID */
private static List<Application> sort(List<Application> applications) {
return applications.stream().sorted(Comparator.comparing(Application::id)).collect(Collectors.toList());
}
} | class ApplicationController {
private static final Logger log = Logger.getLogger(ApplicationController.class.getName());
/** The controller owning this */
private final Controller controller;
/** For persistence */
private final CuratorDb curator;
private final ArtifactRepository artifactRepository;
private final RotationRepository rotationRepository;
private final AthenzClientFactory zmsClientFactory;
private final NameService nameService;
private final ConfigServer configServer;
private final RoutingGenerator routingGenerator;
private final Clock clock;
private final DeploymentTrigger deploymentTrigger;
ApplicationController(Controller controller, CuratorDb curator,
AthenzClientFactory zmsClientFactory, RotationsConfig rotationsConfig,
NameService nameService, ConfigServer configServer,
ArtifactRepository artifactRepository,
RoutingGenerator routingGenerator, BuildService buildService, Clock clock) {
this.controller = controller;
this.curator = curator;
this.zmsClientFactory = zmsClientFactory;
this.nameService = nameService;
this.configServer = configServer;
this.routingGenerator = routingGenerator;
this.clock = clock;
this.artifactRepository = artifactRepository;
this.rotationRepository = new RotationRepository(rotationsConfig, this, curator);
this.deploymentTrigger = new DeploymentTrigger(controller, curator, buildService, clock);
for (Application application : curator.readApplications()) {
lockIfPresent(application.id(), this::store);
}
}
/** Returns the application with the given id, or null if it is not present */
public Optional<Application> get(ApplicationId id) {
return curator.readApplication(id);
}
/**
* Returns the application with the given id
*
* @throws IllegalArgumentException if it does not exist
*/
public Application require(ApplicationId id) {
return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found"));
}
/** Returns a snapshot of all applications */
public List<Application> asList() {
return sort(curator.readApplications());
}
/** Returns all applications of a tenant */
public List<Application> asList(TenantName tenant) {
return sort(curator.readApplications(tenant));
}
/**
* Set the rotations marked as 'global' either 'in' or 'out of' service.
*
* @return The canonical endpoint altered if any
* @throws IOException if rotation status cannot be updated
*/
public List<String> setGlobalRotationStatus(DeploymentId deploymentId, EndpointStatus status) throws IOException {
List<String> rotations = new ArrayList<>();
Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId);
if (endpoint.isPresent()) {
configServer.setGlobalRotationStatus(deploymentId, endpoint.get(), status);
rotations.add(endpoint.get());
}
return rotations;
}
/**
* Get the endpoint status for the global endpoint of this application
*
* @return Map between the endpoint and the rotation status
* @throws IOException if global rotation status cannot be determined
*/
public Map<String, EndpointStatus> getGlobalRotationStatus(DeploymentId deploymentId) throws IOException {
Map<String, EndpointStatus> result = new HashMap<>();
Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId);
if (endpoint.isPresent()) {
EndpointStatus status = configServer.getGlobalRotationStatus(deploymentId, endpoint.get());
result.put(endpoint.get(), status);
}
return result;
}
/**
* Global rotations (plural as we can have aliases) map to exactly one service endpoint.
* This method finds that one service endpoint and strips the URI part that
* the routingGenerator is wrapping around the endpoint.
*
* @param deploymentId The deployment to retrieve global service endpoint for
* @return Empty if no global endpoint exist, otherwise the service endpoint ([clustername.]app.tenant.region.env)
*/
Optional<String> getCanonicalGlobalEndpoint(DeploymentId deploymentId) throws IOException {
Map<String, RoutingEndpoint> hostToGlobalEndpoint = new HashMap<>();
Map<String, String> hostToCanonicalEndpoint = new HashMap<>();
for (RoutingEndpoint endpoint : routingGenerator.endpoints(deploymentId)) {
try {
URI uri = new URI(endpoint.getEndpoint());
String serviceEndpoint = uri.getHost();
if (serviceEndpoint == null) {
throw new IOException("Unexpected endpoints returned from the Routing Generator");
}
String canonicalEndpoint = serviceEndpoint.replaceAll(".vespa.yahooapis.com", "");
String hostname = endpoint.getHostname();
if (hostname != null) {
if (endpoint.isGlobal()) {
hostToGlobalEndpoint.put(hostname, endpoint);
} else {
hostToCanonicalEndpoint.put(hostname, canonicalEndpoint);
}
if (hostToGlobalEndpoint.containsKey(hostname) && hostToCanonicalEndpoint.containsKey(hostname)) {
return Optional.of(hostToCanonicalEndpoint.get(hostname));
}
}
} catch (URISyntaxException use) {
throw new IOException(use);
}
}
return Optional.empty();
}
/**
* Creates a new application for an existing tenant.
*
* @throws IllegalArgumentException if the application already exists
*/
public Application createApplication(ApplicationId id, Optional<NToken> token) {
if ( ! (id.instance().isDefault() || id.instance().value().matches("\\d+")))
throw new UnsupportedOperationException("Only the instance names 'default' and names which are just the PR number are supported at the moment");
try (Lock lock = lock(id)) {
if (asList(id.tenant()).stream().noneMatch(application -> application.id().application().equals(id.application())))
com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value());
Optional<Tenant> tenant = controller.tenants().tenant(id.tenant());
if ( ! tenant.isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist");
if (get(id).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application already exists");
if (get(dashToUnderscore(id)).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists");
if (id.instance().isDefault() && tenant.get() instanceof AthenzTenant) {
if ( ! token.isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': No NToken provided");
ZmsClient zmsClient = zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get());
zmsClient.addApplication(((AthenzTenant) tenant.get()).domain(),
new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value()));
}
LockedApplication application = new LockedApplication(new Application(id), lock);
store(application);
log.info("Created " + application);
return application;
}
}
/** Deploys an application. If the application does not exist it is created. */
public ActivateResult deployApplication(ApplicationId applicationId, ZoneId zone,
Optional<ApplicationPackage> applicationPackageFromDeployer,
DeployOptions options) {
try (Lock lock = lock(applicationId)) {
LockedApplication application = get(applicationId)
.map(app -> new LockedApplication(app, lock))
.orElseGet(() -> new LockedApplication(createApplication(applicationId, Optional.empty()), lock));
boolean canDeployDirectly = ! options.screwdriverBuildJob.map(job1 -> job1.screwdriverId).isPresent()
|| zone.environment().isManuallyDeployed();
boolean preferOldestVersion = options.deployCurrentVersion;
Version platformVersion;
ApplicationVersion applicationVersion;
ApplicationPackage applicationPackage;
if (canDeployDirectly) {
platformVersion = options.vespaVersion.map(Version::new).orElse(controller.systemVersion());
applicationVersion = ApplicationVersion.unknown;
applicationPackage = applicationPackageFromDeployer.orElseThrow(
() -> new IllegalArgumentException("Application package must be given when deploying to " + zone));
} else {
JobType jobType = JobType.from(controller.system(), zone)
.orElseThrow(() -> new IllegalArgumentException("No job found for zone " + zone));
Optional<JobRun> triggered = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType))
.flatMap(JobStatus::lastTriggered);
if ( ! triggered.isPresent())
return unexpectedDeployment(applicationId, zone);
platformVersion = preferOldestVersion
? triggered.get().sourcePlatform().orElse(triggered.get().platform())
: triggered.get().platform();
applicationVersion = preferOldestVersion
? triggered.get().sourceApplication().orElse(triggered.get().application())
: triggered.get().application();
applicationPackage = new ApplicationPackage(artifactRepository.getApplicationPackage(application.id(), applicationVersion.id()));
validateRun(application, zone, platformVersion, applicationVersion);
}
validate(applicationPackage.deploymentSpec());
if ( ! preferOldestVersion) {
application = application.with(applicationPackage.deploymentSpec());
application = application.with(applicationPackage.validationOverrides());
application = deleteRemovedDeployments(application);
application = deleteUnreferencedDeploymentJobs(application);
store(application);
}
application = withRotation(application, zone);
Set<String> rotationNames = new HashSet<>();
Set<String> cnames = new HashSet<>();
application.rotation().ifPresent(applicationRotation -> {
rotationNames.add(applicationRotation.id().asString());
cnames.add(applicationRotation.dnsName());
cnames.add(applicationRotation.secureDnsName());
});
options = withVersion(platformVersion, options);
DeploymentId deploymentId = new DeploymentId(applicationId, zone);
ConfigServer.PreparedApplication preparedApplication =
configServer.deploy(deploymentId, options, cnames, rotationNames, applicationPackage.zippedContent());
application = application.withNewDeployment(zone, applicationVersion, platformVersion, clock.instant());
store(application);
return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(),
applicationPackage.zippedContent().length);
}
}
/** Makes sure the application has a global rotation, if eligible. */
private LockedApplication withRotation(LockedApplication application, ZoneId zone) {
if (zone.environment() == Environment.prod && application.deploymentSpec().globalServiceId().isPresent()) {
try (RotationLock rotationLock = rotationRepository.lock()) {
Rotation rotation = rotationRepository.getOrAssignRotation(application, rotationLock);
application = application.with(rotation.id());
store(application);
registerRotationInDns(rotation, application.rotation().get().dnsName());
registerRotationInDns(rotation, application.rotation().get().secureDnsName());
}
}
return application;
}
private ActivateResult unexpectedDeployment(ApplicationId applicationId, ZoneId zone) {
Log logEntry = new Log();
logEntry.level = "WARNING";
logEntry.time = clock.instant().toEpochMilli();
logEntry.message = "Ignoring deployment of " + require(applicationId) + " to " + zone +
" as a deployment is not currently expected";
PrepareResponse prepareResponse = new PrepareResponse();
prepareResponse.log = Collections.singletonList(logEntry);
prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList());
return new ActivateResult(new RevisionId("0"), prepareResponse, 0);
}
private LockedApplication deleteRemovedDeployments(LockedApplication application) {
List<Deployment> deploymentsToRemove = application.productionDeployments().values().stream()
.filter(deployment -> ! application.deploymentSpec().includes(deployment.zone().environment(),
Optional.of(deployment.zone().region())))
.collect(Collectors.toList());
if (deploymentsToRemove.isEmpty()) return application;
if ( ! application.validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant()))
throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application +
" is deployed in " +
deploymentsToRemove.stream()
.map(deployment -> deployment.zone().region().value())
.collect(Collectors.joining(", ")) +
", but does not include " +
(deploymentsToRemove.size() > 1 ? "these zones" : "this zone") +
" in deployment.xml");
LockedApplication applicationWithRemoval = application;
for (Deployment deployment : deploymentsToRemove)
applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone());
return applicationWithRemoval;
}
private LockedApplication deleteUnreferencedDeploymentJobs(LockedApplication application) {
for (JobType job : application.deploymentJobs().jobStatus().keySet()) {
Optional<ZoneId> zone = job.zone(controller.system());
if ( ! job.isProduction() || (zone.isPresent() && application.deploymentSpec().includes(zone.get().environment(), zone.map(ZoneId::region))))
continue;
application = application.withoutDeploymentJob(job);
}
return application;
}
private DeployOptions withVersion(Version version, DeployOptions options) {
return new DeployOptions(options.screwdriverBuildJob,
Optional.of(version),
options.ignoreValidationErrors,
options.deployCurrentVersion);
}
/** Register a DNS name for rotation */
private void registerRotationInDns(Rotation rotation, String dnsName) {
try {
Optional<Record> record = nameService.findRecord(Record.Type.CNAME, RecordName.from(dnsName));
RecordData rotationName = RecordData.fqdn(rotation.name());
if (record.isPresent()) {
if ( ! record.get().data().equals(rotationName)) {
nameService.updateRecord(record.get().id(), rotationName);
log.info("Updated mapping for record ID " + record.get().id().asString() + ": '" + dnsName
+ "' -> '" + rotation.name() + "'");
}
} else {
RecordId id = nameService.createCname(RecordName.from(dnsName), rotationName);
log.info("Registered mapping with record ID " + id.asString() + ": '" + dnsName + "' -> '"
+ rotation.name() + "'");
}
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to register CNAME", e);
}
}
/** Returns the endpoints of the deployment, or an empty list if the request fails */
public Optional<List<URI>> getDeploymentEndpoints(DeploymentId deploymentId) {
try {
return Optional.of(ImmutableList.copyOf(routingGenerator.endpoints(deploymentId).stream()
.map(RoutingEndpoint::getEndpoint)
.map(URI::create)
.iterator()));
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId + ": "
+ Exceptions.toMessageString(e));
return Optional.empty();
}
}
/**
* Deletes the the given application. All known instances of the applications will be deleted,
* including PR instances.
*
* @throws IllegalArgumentException if the application has deployments or the caller is not authorized
* @throws NotExistsException if no instances of the application exist
*/
public void deleteApplication(ApplicationId applicationId, Optional<NToken> token) {
List<ApplicationId> instances = controller.applications().asList(applicationId.tenant())
.stream()
.map(Application::id)
.filter(id -> id.application().equals(applicationId.application()) &&
id.tenant().equals(applicationId.tenant()))
.collect(Collectors.toList());
if (instances.isEmpty()) {
throw new NotExistsException("Could not delete application '" + applicationId + "': Application not found");
}
instances.forEach(id -> lockOrThrow(id, application -> {
if ( ! application.deployments().isEmpty())
throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments");
Tenant tenant = controller.tenants().tenant(id.tenant()).get();
if (tenant instanceof AthenzTenant && ! token.isPresent())
throw new IllegalArgumentException("Could not delete '" + application + "': No NToken provided");
if (id.instance().isDefault() && tenant instanceof AthenzTenant) {
zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get())
.deleteApplication(((AthenzTenant) tenant).domain(),
new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value()));
}
curator.removeApplication(id);
log.info("Deleted " + application);
}));
}
/**
* Replace any previous version of this application by this instance
*
* @param application a locked application to store
*/
public void store(LockedApplication application) {
curator.writeApplication(application);
}
/**
* Acquire a locked application to modify and store, if there is an application with the given id.
*
* @param applicationId ID of the application to lock and get.
* @param action Function which acts on the locked application.
*/
public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action);
}
}
/**
* Acquire a locked application to modify and store, or throw an exception if no application has the given id.
*
* @param applicationId ID of the application to lock and require.
* @param action Function which acts on the locked application.
* @throws IllegalArgumentException when application does not exist.
*/
public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
action.accept(new LockedApplication(require(applicationId), lock));
}
}
/**
* Tells config server to schedule a restart of all nodes in this deployment
*
* @param hostname If non-empty, restart will only be scheduled for this host
*/
public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) {
try {
configServer.restart(deploymentId, hostname);
}
catch (NoInstanceException e) {
throw new IllegalArgumentException("Could not restart " + deploymentId + ": No such deployment");
}
}
/** Deactivate application in the given zone */
/**
* Deactivates a locked application without storing it
*
* @return the application with the deployment in the given zone removed
*/
private LockedApplication deactivate(LockedApplication application, ZoneId zone) {
try {
configServer.deactivate(new DeploymentId(application.id(), zone));
}
catch (NoInstanceException ignored) {
}
return application.withoutDeploymentIn(zone);
}
public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; }
private ApplicationId dashToUnderscore(ApplicationId id) {
return ApplicationId.from(id.tenant().value(),
id.application().value().replaceAll("-", "_"),
id.instance().value());
}
public ConfigServer configServer() { return configServer; }
/**
* Returns a lock which provides exclusive rights to changing this application.
* Any operation which stores an application need to first acquire this lock, then read, modify
* and store the application, and finally release (close) the lock.
*/
Lock lock(ApplicationId application) {
return curator.lock(application, Duration.ofMinutes(10));
}
/** Verify that each of the production zones listed in the deployment spec exist in this system. */
private void validate(DeploymentSpec deploymentSpec) {
deploymentSpec.zones().stream()
.filter(zone -> zone.environment() == Environment.prod)
.forEach(zone -> {
if ( ! controller.zoneRegistry().hasZone(ZoneId.from(zone.environment(),
zone.region().orElse(null)))) {
throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!");
}
});
}
/** Verify that we don't downgrade an existing production deployment. */
private void validateRun(Application application, ZoneId zone, Version platformVersion, ApplicationVersion applicationVersion) {
Deployment deployment = application.deployments().get(zone);
if ( zone.environment().isProduction() && deployment != null
&& ( platformVersion.compareTo(deployment.version()) < 0
|| applicationVersion.compareTo(deployment.applicationVersion()) < 0))
throw new IllegalArgumentException(String.format("Rejecting deployment of %s to %s, as the requested versions (platform: %s, application: %s)" +
" are older than the currently deployed (platform: %s, application: %s).",
application, zone, platformVersion, applicationVersion, deployment.version(), deployment.applicationVersion()));
}
public RotationRepository rotationRepository() {
return rotationRepository;
}
/** Sort given list of applications by application ID */
private static List<Application> sort(List<Application> applications) {
return applications.stream().sorted(Comparator.comparing(Application::id)).collect(Collectors.toList());
}
} |
https://github.com/StarRocks/starrocks/pull/36152/files | private void onFinished(Database db, OlapTable targetTable) throws AlterCancelException {
try {
tmpPartitionNames = getTmpPartitionIds().stream()
.map(partitionId -> targetTable.getPartition(partitionId).getName())
.collect(Collectors.toList());
Map<String, Long> partitionLastVersion = Maps.newHashMap();
optimizeClause.getSourcePartitionIds().stream()
.map(partitionId -> targetTable.getPartition(partitionId)).forEach(
partition -> {
sourcePartitionNames.add(partition.getName());
partitionLastVersion.put(partition.getName(), partition.getSubPartitions().stream()
.mapToLong(PhysicalPartition::getVisibleVersion).sum());
}
);
boolean hasFailedTask = false;
for (OptimizeTask rewriteTask : rewriteTasks) {
if (rewriteTask.getOptimizeTaskState() == Constants.TaskRunState.FAILED
|| partitionLastVersion.get(rewriteTask.getPartitionName()) != rewriteTask.getLastVersion()) {
LOG.info("optimize job {} rewrite task {} state {} failed or partition {} version {} change to {}",
jobId, rewriteTask.getName(), rewriteTask.getOptimizeTaskState(), rewriteTask.getPartitionName(),
rewriteTask.getLastVersion(), partitionLastVersion.get(rewriteTask.getPartitionName()));
sourcePartitionNames.remove(rewriteTask.getPartitionName());
tmpPartitionNames.remove(rewriteTask.getTempPartitionName());
targetTable.dropTempPartition(rewriteTask.getTempPartitionName(), true);
hasFailedTask = true;
}
}
if (sourcePartitionNames.isEmpty()) {
throw new AlterCancelException("all partitions rewrite failed");
}
if (hasFailedTask && (optimizeClause.getKeysDesc() != null || optimizeClause.getSortKeys() != null)) {
rewriteTasks.forEach(
rewriteTask -> targetTable.dropTempPartition(rewriteTask.getTempPartitionName(), true));
throw new AlterCancelException("optimize keysType or sort keys failed since some partitions rewrite failed");
}
Set<Tablet> sourceTablets = Sets.newHashSet();
sourcePartitionNames.forEach(name -> {
Partition partition = targetTable.getPartition(name);
for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) {
sourceTablets.addAll(index.getTablets());
}
});
allPartitionOptimized = false;
if (!hasFailedTask && optimizeClause.getDistributionDesc() != null) {
Set<String> targetPartitionNames = targetTable.getPartitionNames();
long targetPartitionNum = targetPartitionNames.size();
targetPartitionNames.retainAll(sourcePartitionNames);
if (targetPartitionNames.size() == targetPartitionNum && targetPartitionNum == sourcePartitionNames.size()) {
allPartitionOptimized = true;
} else if (optimizeClause.getDistributionDesc().getType() != targetTable.getDefaultDistributionInfo().getType()) {
throw new AlterCancelException("can not change distribution type of target table" +
"since partial partitions are not optimized");
}
}
PartitionInfo partitionInfo = targetTable.getPartitionInfo();
if (partitionInfo.isRangePartition() || partitionInfo.getType() == PartitionType.LIST) {
targetTable.replaceTempPartitions(sourcePartitionNames, tmpPartitionNames, true, false);
} else if (partitionInfo instanceof SinglePartitionInfo) {
Preconditions.checkState(sourcePartitionNames.size() == 1 && tmpPartitionNames.size() == 1);
targetTable.replacePartition(sourcePartitionNames.get(0), tmpPartitionNames.get(0));
} else {
throw new AlterCancelException("partition type " + partitionInfo.getType() + " is not supported");
}
ReplacePartitionOperationLog info = new ReplacePartitionOperationLog(db.getId(), targetTable.getId(),
sourcePartitionNames, tmpPartitionNames, true, false, partitionInfo instanceof SinglePartitionInfo);
GlobalStateMgr.getCurrentState().getEditLog().logReplaceTempPartition(info);
sourceTablets.forEach(GlobalStateMgr.getCurrentInvertedIndex()::markTabletForceDelete);
try {
GlobalStateMgr.getCurrentColocateIndex().updateLakeTableColocationInfo(targetTable,
true /* isJoin */, null /* expectGroupId */);
} catch (DdlException e) {
LOG.error("table {} update colocation info failed after insert overwrite, {}.", tableId, e.getMessage());
}
targetTable.lastSchemaUpdateTime.set(System.currentTimeMillis());
if (allPartitionOptimized) {
this.distributionInfo = optimizeClause.getDistributionDesc().toDistributionInfo(targetTable.getColumns());
targetTable.setDefaultDistributionInfo(distributionInfo);
}
targetTable.setState(OlapTableState.NORMAL);
LOG.info("optimize job {} finish replace partitions dbId:{}, tableId:{},"
+ "source partitions:{}, tmp partitions:{}, allOptimized:{}",
jobId, dbId, tableId, sourcePartitionNames, tmpPartitionNames, allPartitionOptimized);
} catch (Exception e) {
LOG.warn("replace partitions failed when insert overwrite into dbId:{}, tableId:{}",
dbId, tableId, e);
throw new AlterCancelException("replace partitions failed " + e);
}
} | LOG.error("table {} update colocation info failed after insert overwrite, {}.", tableId, e.getMessage()); | private void onFinished(Database db, OlapTable targetTable) throws AlterCancelException {
try {
tmpPartitionNames = getTmpPartitionIds().stream()
.map(partitionId -> targetTable.getPartition(partitionId).getName())
.collect(Collectors.toList());
Map<String, Long> partitionLastVersion = Maps.newHashMap();
optimizeClause.getSourcePartitionIds().stream()
.map(partitionId -> targetTable.getPartition(partitionId)).forEach(
partition -> {
sourcePartitionNames.add(partition.getName());
partitionLastVersion.put(partition.getName(), partition.getSubPartitions().stream()
.mapToLong(PhysicalPartition::getVisibleVersion).sum());
}
);
boolean hasFailedTask = false;
for (OptimizeTask rewriteTask : rewriteTasks) {
if (rewriteTask.getOptimizeTaskState() == Constants.TaskRunState.FAILED
|| partitionLastVersion.get(rewriteTask.getPartitionName()) != rewriteTask.getLastVersion()) {
LOG.info("optimize job {} rewrite task {} state {} failed or partition {} version {} change to {}",
jobId, rewriteTask.getName(), rewriteTask.getOptimizeTaskState(), rewriteTask.getPartitionName(),
rewriteTask.getLastVersion(), partitionLastVersion.get(rewriteTask.getPartitionName()));
sourcePartitionNames.remove(rewriteTask.getPartitionName());
tmpPartitionNames.remove(rewriteTask.getTempPartitionName());
targetTable.dropTempPartition(rewriteTask.getTempPartitionName(), true);
hasFailedTask = true;
}
}
if (sourcePartitionNames.isEmpty()) {
throw new AlterCancelException("all partitions rewrite failed");
}
if (hasFailedTask && (optimizeClause.getKeysDesc() != null || optimizeClause.getSortKeys() != null)) {
rewriteTasks.forEach(
rewriteTask -> targetTable.dropTempPartition(rewriteTask.getTempPartitionName(), true));
throw new AlterCancelException("optimize keysType or sort keys failed since some partitions rewrite failed");
}
Set<Tablet> sourceTablets = Sets.newHashSet();
sourcePartitionNames.forEach(name -> {
Partition partition = targetTable.getPartition(name);
for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) {
sourceTablets.addAll(index.getTablets());
}
});
allPartitionOptimized = false;
if (!hasFailedTask && optimizeClause.getDistributionDesc() != null) {
Set<String> targetPartitionNames = targetTable.getPartitionNames();
long targetPartitionNum = targetPartitionNames.size();
targetPartitionNames.retainAll(sourcePartitionNames);
if (targetPartitionNames.size() == targetPartitionNum && targetPartitionNum == sourcePartitionNames.size()) {
allPartitionOptimized = true;
} else if (optimizeClause.getDistributionDesc().getType() != targetTable.getDefaultDistributionInfo().getType()) {
throw new AlterCancelException("can not change distribution type of target table" +
"since partial partitions are not optimized");
}
}
PartitionInfo partitionInfo = targetTable.getPartitionInfo();
if (partitionInfo.isRangePartition() || partitionInfo.getType() == PartitionType.LIST) {
targetTable.replaceTempPartitions(sourcePartitionNames, tmpPartitionNames, true, false);
} else if (partitionInfo instanceof SinglePartitionInfo) {
Preconditions.checkState(sourcePartitionNames.size() == 1 && tmpPartitionNames.size() == 1);
targetTable.replacePartition(sourcePartitionNames.get(0), tmpPartitionNames.get(0));
} else {
throw new AlterCancelException("partition type " + partitionInfo.getType() + " is not supported");
}
ReplacePartitionOperationLog info = new ReplacePartitionOperationLog(db.getId(), targetTable.getId(),
sourcePartitionNames, tmpPartitionNames, true, false, partitionInfo instanceof SinglePartitionInfo);
GlobalStateMgr.getCurrentState().getEditLog().logReplaceTempPartition(info);
sourceTablets.forEach(GlobalStateMgr.getCurrentInvertedIndex()::markTabletForceDelete);
try {
GlobalStateMgr.getCurrentColocateIndex().updateLakeTableColocationInfo(targetTable,
true /* isJoin */, null /* expectGroupId */);
} catch (DdlException e) {
LOG.error("table {} update colocation info failed after insert overwrite, {}.", tableId, e.getMessage());
}
targetTable.lastSchemaUpdateTime.set(System.currentTimeMillis());
if (allPartitionOptimized) {
this.distributionInfo = optimizeClause.getDistributionDesc().toDistributionInfo(targetTable.getColumns());
targetTable.setDefaultDistributionInfo(distributionInfo);
}
targetTable.setState(OlapTableState.NORMAL);
LOG.info("optimize job {} finish replace partitions dbId:{}, tableId:{},"
+ "source partitions:{}, tmp partitions:{}, allOptimized:{}",
jobId, dbId, tableId, sourcePartitionNames, tmpPartitionNames, allPartitionOptimized);
} catch (Exception e) {
LOG.warn("replace partitions failed when insert overwrite into dbId:{}, tableId:{}",
dbId, tableId, e);
throw new AlterCancelException("replace partitions failed " + e);
}
} | class OptimizeJobV2 extends AlterJobV2 implements GsonPostProcessable {
private static final Logger LOG = LogManager.getLogger(OptimizeJobV2.class);
@SerializedName(value = "watershedTxnId")
protected long watershedTxnId = -1;
private final String postfix;
@SerializedName(value = "tmpPartitionIds")
private List<Long> tmpPartitionIds = Lists.newArrayList();
private OptimizeClause optimizeClause;
private String dbName = "";
private Map<String, String> properties = Maps.newHashMap();
@SerializedName(value = "rewriteTasks")
private List<OptimizeTask> rewriteTasks = Lists.newArrayList();
private int progress = 0;
@SerializedName(value = "sourcePartitionNames")
private List<String> sourcePartitionNames = Lists.newArrayList();
@SerializedName(value = "tmpPartitionNames")
private List<String> tmpPartitionNames = Lists.newArrayList();
@SerializedName(value = "allPartitionOptimized")
private Boolean allPartitionOptimized = false;
@SerializedName(value = "distributionInfo")
private DistributionInfo distributionInfo;
@SerializedName(value = "optimizeOperation")
private String optimizeOperation = "";
public OptimizeJobV2(long jobId, long dbId, long tableId, String tableName, long timeoutMs,
OptimizeClause optimizeClause) {
this(jobId, dbId, tableId, tableName, timeoutMs);
this.optimizeClause = optimizeClause;
}
public OptimizeJobV2(long jobId, long dbId, long tableId, String tableName, long timeoutMs) {
super(jobId, JobType.OPTIMIZE, dbId, tableId, tableName, timeoutMs);
this.postfix = "_" + jobId;
}
public List<Long> getTmpPartitionIds() {
return tmpPartitionIds;
}
public void setTmpPartitionIds(List<Long> tmpPartitionIds) {
this.tmpPartitionIds = tmpPartitionIds;
}
public String getName() {
return "optimize-" + this.postfix;
}
public Map<String, String> getProperties() {
return properties;
}
public List<OptimizeTask> getOptimizeTasks() {
return rewriteTasks;
}
private OlapTable checkAndGetTable(Database db, long tableId) throws AlterCancelException {
Table table = db.getTable(tableId);
if (table == null) {
throw new AlterCancelException("table: " + tableId + " does not exist in database: " + db.getFullName());
}
Preconditions.checkState(table instanceof OlapTable);
return (OlapTable) table;
}
/**
* runPendingJob():
* 1. Create all temp partitions and wait them finished.
* 2. Get a new transaction id, then set job's state to WAITING_TXN
*/
@Override
protected void runPendingJob() throws AlterCancelException {
Preconditions.checkState(jobState == JobState.PENDING, jobState);
LOG.info("begin to send create temp partitions. job: {}", jobId);
Database db = GlobalStateMgr.getCurrentState().getDb(dbId);
if (db == null) {
throw new AlterCancelException("Database " + dbId + " does not exist");
}
if (!checkTableStable(db)) {
return;
}
if (optimizeClause == null) {
throw new AlterCancelException("optimize clause is null since FE restart, job: " + jobId);
}
for (int i = 0; i < optimizeClause.getSourcePartitionIds().size(); ++i) {
tmpPartitionIds.add(GlobalStateMgr.getCurrentState().getNextId());
}
long createPartitionStartTimestamp = System.currentTimeMillis();
OlapTable targetTable;
Locker locker = new Locker();
locker.lockDatabase(db, LockType.READ);
try {
targetTable = checkAndGetTable(db, tableId);
} finally {
locker.unLockDatabase(db, LockType.READ);
}
try {
PartitionUtils.createAndAddTempPartitionsForTable(db, targetTable, postfix,
optimizeClause.getSourcePartitionIds(), getTmpPartitionIds(), optimizeClause.getDistributionDesc());
LOG.debug("create temp partitions {} success. job: {}", getTmpPartitionIds(), jobId);
} catch (Exception e) {
LOG.warn("create temp partitions failed", e);
throw new AlterCancelException("create temp partitions failed " + e);
}
long createPartitionElapse = System.currentTimeMillis() - createPartitionStartTimestamp;
this.watershedTxnId =
GlobalStateMgr.getCurrentGlobalTransactionMgr().getTransactionIDGenerator().getNextTransactionId();
this.jobState = JobState.WAITING_TXN;
this.optimizeOperation = optimizeClause.toString();
span.setAttribute("createPartitionElapse", createPartitionElapse);
span.setAttribute("watershedTxnId", this.watershedTxnId);
span.addEvent("setWaitingTxn");
GlobalStateMgr.getCurrentState().getEditLog().logAlterJob(this);
LOG.info("transfer optimize job {} state to {}, watershed txn_id: {}", jobId, this.jobState, watershedTxnId);
}
/**
* runWaitingTxnJob():
* 1. Wait the transactions before the watershedTxnId to be finished.
* 2. If all previous transactions finished, start insert into data to temp partitions.
* 3. Change job state to RUNNING.
*/
@Override
protected void runWaitingTxnJob() throws AlterCancelException {
Preconditions.checkState(jobState == JobState.WAITING_TXN, jobState);
if (optimizeClause == null) {
throw new AlterCancelException("optimize clause is null since FE restart, job: " + jobId);
}
try {
if (!isPreviousLoadFinished()) {
LOG.info("wait transactions before {} to be finished, optimize job: {}", watershedTxnId, jobId);
return;
}
} catch (AnalysisException e) {
throw new AlterCancelException(e.getMessage());
}
LOG.info("previous transactions are all finished, begin to optimize table. job: {}", jobId);
List<String> tmpPartitionNames;
List<String> partitionNames = Lists.newArrayList();
List<Long> partitionLastVersion = Lists.newArrayList();
List<String> tableCoumnNames = Lists.newArrayList();
Database db = GlobalStateMgr.getCurrentState().getDb(dbId);
if (db == null) {
throw new AlterCancelException("database id: " + dbId + " does not exist");
}
Locker locker = new Locker();
if (!locker.lockAndCheckExist(db, LockType.READ)) {
throw new AlterCancelException("insert overwrite commit failed because locking db: " + dbId + " failed");
}
try {
dbName = db.getFullName();
OlapTable targetTable = checkAndGetTable(db, tableId);
if (getTmpPartitionIds().stream().anyMatch(id -> targetTable.getPartition(id) == null)) {
throw new AlterCancelException("partitions changed during insert");
}
tmpPartitionNames = getTmpPartitionIds().stream()
.map(partitionId -> targetTable.getPartition(partitionId).getName())
.collect(Collectors.toList());
optimizeClause.getSourcePartitionIds().stream()
.map(partitionId -> targetTable.getPartition(partitionId)).forEach(
partition -> {
partitionNames.add(partition.getName());
partitionLastVersion.add(partition.getSubPartitions().stream()
.mapToLong(PhysicalPartition::getVisibleVersion).sum());
}
);
tableCoumnNames = targetTable.getBaseSchema().stream().filter(column -> !column.isGeneratedColumn())
.map(Column::getName).collect(Collectors.toList());
} finally {
locker.unLockDatabase(db, LockType.READ);
}
for (int i = 0; i < tmpPartitionNames.size(); ++i) {
String tmpPartitionName = tmpPartitionNames.get(i);
String partitionName = partitionNames.get(i);
String rewriteSql = "insert into " + tableName + " TEMPORARY PARTITION ("
+ tmpPartitionName + ") select " + Joiner.on(", ").join(tableCoumnNames)
+ " from " + tableName + " partition (" + partitionName + ")";
String taskName = getName() + "_" + tmpPartitionName;
OptimizeTask rewriteTask = TaskBuilder.buildOptimizeTask(taskName, properties, rewriteSql, dbName);
rewriteTask.setPartitionName(partitionName);
rewriteTask.setTempPartitionName(tmpPartitionName);
rewriteTask.setLastVersion(partitionLastVersion.get(i));
rewriteTasks.add(rewriteTask);
}
TaskManager taskManager = GlobalStateMgr.getCurrentState().getTaskManager();
for (OptimizeTask rewriteTask : rewriteTasks) {
try {
taskManager.createTask(rewriteTask, false);
taskManager.executeTask(rewriteTask.getName());
LOG.debug("create rewrite task {}", rewriteTask.toString());
} catch (DdlException e) {
rewriteTask.setOptimizeTaskState(Constants.TaskRunState.FAILED);
LOG.warn("create rewrite task failed", e);
}
}
this.jobState = JobState.RUNNING;
span.addEvent("setRunning");
LOG.info("transfer optimize job {} state to {}", jobId, this.jobState);
}
/**
* runRunningJob()
* 1. Wait insert into tasks to be finished.
* 2. Replace partitions with temp partitions.
* 3. Set job'state as FINISHED.
*/
@Override
protected void runRunningJob() throws AlterCancelException {
Preconditions.checkState(jobState == JobState.RUNNING, jobState);
Database db = GlobalStateMgr.getCurrentState().getDb(dbId);
if (db == null) {
throw new AlterCancelException("Databasee " + dbId + " does not exist");
}
OlapTable tbl = null;
Locker locker = new Locker();
locker.lockDatabase(db, LockType.READ);
try {
tbl = (OlapTable) db.getTable(tableId);
if (tbl == null) {
throw new AlterCancelException("Table " + tableId + " does not exist");
}
} finally {
locker.unLockDatabase(db, LockType.READ);
}
boolean allFinished = true;
int progress = 0;
for (OptimizeTask rewriteTask : rewriteTasks) {
if (rewriteTask.getOptimizeTaskState() == Constants.TaskRunState.FAILED
|| rewriteTask.getOptimizeTaskState() == Constants.TaskRunState.SUCCESS) {
progress += 100 / rewriteTasks.size();
continue;
}
TaskRun taskRun = GlobalStateMgr.getCurrentState().getTaskManager().getTaskRunManager()
.getRunnableTaskRun(rewriteTask.getId());
if (taskRun != null) {
if (taskRun.getStatus() != null) {
progress += taskRun.getStatus().getProgress() / rewriteTasks.size();
}
allFinished = false;
continue;
}
TaskRunStatus status = GlobalStateMgr.getCurrentState().getTaskManager()
.getTaskRunManager().getTaskRunHistory().getTaskByName(rewriteTask.getName());
if (status == null) {
allFinished = false;
continue;
}
if (status.getState() == Constants.TaskRunState.FAILED) {
LOG.warn("optimize task {} failed", rewriteTask.getName());
rewriteTask.setOptimizeTaskState(Constants.TaskRunState.FAILED);
}
progress += 100 / rewriteTasks.size();
}
if (!allFinished) {
LOG.debug("wait insert tasks to be finished, optimize job: {}", jobId);
this.progress = progress;
return;
}
this.progress = 99;
LOG.debug("all insert overwrite tasks finished, optimize job: {}", jobId);
locker.lockDatabase(db, LockType.WRITE);
try {
onFinished(db, tbl);
} finally {
locker.unLockDatabase(db, LockType.WRITE);
}
this.progress = 100;
this.jobState = JobState.FINISHED;
this.finishedTimeMs = System.currentTimeMillis();
GlobalStateMgr.getCurrentState().getEditLog().logAlterJob(this);
LOG.info("optimize job finished: {}", jobId);
this.span.end();
}
@Override
protected void runFinishedRewritingJob() {
}
/**
* cancelImpl() can be called any time any place.
* We need to clean any possible residual of this job.
*/
@Override
protected synchronized boolean cancelImpl(String errMsg) {
if (jobState.isFinalState()) {
return false;
}
cancelInternal();
jobState = JobState.CANCELLED;
this.errMsg = errMsg;
this.finishedTimeMs = System.currentTimeMillis();
LOG.info("cancel {} job {}, err: {}", this.type, jobId, errMsg);
GlobalStateMgr.getCurrentState().getEditLog().logAlterJob(this);
span.setStatus(StatusCode.ERROR, errMsg);
span.end();
return true;
}
private void cancelInternal() {
Database db = null;
Locker locker = new Locker();
try {
db = GlobalStateMgr.getCurrentState().getDb(dbId);
if (db == null) {
throw new AlterCancelException("database id:" + dbId + " does not exist");
}
if (!locker.lockAndCheckExist(db, LockType.WRITE)) {
throw new AlterCancelException("insert overwrite commit failed because locking db:" + dbId + " failed");
}
} catch (Exception e) {
LOG.warn("get and write lock database failed when cancel job: {}", jobId, e);
return;
}
try {
Table table = db.getTable(tableId);
if (table == null) {
throw new AlterCancelException("table:" + tableId + " does not exist in database:" + db.getFullName());
}
Preconditions.checkState(table instanceof OlapTable);
OlapTable targetTable = (OlapTable) table;
Set<Tablet> sourceTablets = Sets.newHashSet();
if (getTmpPartitionIds() != null) {
for (long pid : getTmpPartitionIds()) {
LOG.info("optimize job {} drop temp partition:{}", jobId, pid);
Partition partition = targetTable.getPartition(pid);
if (partition != null) {
for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) {
sourceTablets.addAll(index.getTablets());
}
targetTable.dropTempPartition(partition.getName(), true);
} else {
LOG.warn("partition {} is null", pid);
}
}
}
sourceTablets.forEach(GlobalStateMgr.getCurrentInvertedIndex()::markTabletForceDelete);
targetTable.setState(OlapTableState.NORMAL);
} catch (Exception e) {
LOG.warn("exception when cancel optimize job.", e);
} finally {
locker.unLockDatabase(db, LockType.WRITE);
}
}
protected boolean isPreviousLoadFinished() throws AnalysisException {
return GlobalStateMgr.getCurrentGlobalTransactionMgr()
.isPreviousTransactionsFinished(watershedTxnId, dbId, Lists.newArrayList(tableId));
}
/**
* Replay job in PENDING state.
* Should replay all changes before this job's state transfer to PENDING.
*/
private void replayPending(OptimizeJobV2 replayedJob) {
Database db = GlobalStateMgr.getCurrentState().getDb(dbId);
if (db == null) {
return;
}
Locker locker = new Locker();
locker.lockDatabase(db, LockType.WRITE);
try {
OlapTable tbl = (OlapTable) db.getTable(tableId);
if (tbl == null) {
return;
}
tbl.setState(OlapTableState.SCHEMA_CHANGE);
} finally {
locker.unLockDatabase(db, LockType.WRITE);
}
this.jobState = JobState.PENDING;
this.watershedTxnId = replayedJob.watershedTxnId;
this.optimizeOperation = replayedJob.optimizeOperation;
LOG.info("replay pending optimize job: {}", jobId);
}
/**
* Replay job in WAITING_TXN state.
* Should replay all changes in runPendingJob()
*/
private void replayWaitingTxn(OptimizeJobV2 replayedJob) {
Database db = GlobalStateMgr.getCurrentState().getDb(dbId);
if (db == null) {
return;
}
OlapTable tbl = null;
Locker locker = new Locker();
locker.lockDatabase(db, LockType.WRITE);
try {
tbl = (OlapTable) db.getTable(tableId);
if (tbl == null) {
return;
}
} finally {
locker.unLockDatabase(db, LockType.WRITE);
}
for (long id : replayedJob.getTmpPartitionIds()) {
tmpPartitionIds.add(id);
}
this.jobState = JobState.WAITING_TXN;
this.watershedTxnId = replayedJob.watershedTxnId;
this.optimizeOperation = replayedJob.optimizeOperation;
LOG.info("replay waiting txn optimize job: {}", jobId);
}
private void onReplayFinished(OptimizeJobV2 replayedJob, OlapTable targetTable) {
this.sourcePartitionNames = replayedJob.sourcePartitionNames;
this.tmpPartitionNames = replayedJob.tmpPartitionNames;
this.allPartitionOptimized = replayedJob.allPartitionOptimized;
this.optimizeOperation = replayedJob.optimizeOperation;
Set<Tablet> sourceTablets = Sets.newHashSet();
for (long id : replayedJob.getTmpPartitionIds()) {
Partition partition = targetTable.getPartition(id);
if (partition != null) {
for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) {
sourceTablets.addAll(index.getTablets());
}
targetTable.dropTempPartition(partition.getName(), true);
}
}
sourceTablets.forEach(GlobalStateMgr.getCurrentInvertedIndex()::markTabletForceDelete);
if (allPartitionOptimized) {
this.distributionInfo = replayedJob.distributionInfo;
LOG.debug("set distribution info to table: {}", distributionInfo);
targetTable.setDefaultDistributionInfo(distributionInfo);
}
targetTable.setState(OlapTableState.NORMAL);
LOG.info("finish replay optimize job {} dbId:{}, tableId:{},"
+ "source partitions:{}, tmp partitions:{}, allOptimized:{}",
jobId, dbId, tableId, sourcePartitionNames, tmpPartitionNames, allPartitionOptimized);
}
/**
* Replay job in FINISHED state.
* Should replay all changes in runRuningJob()
*/
private void replayFinished(OptimizeJobV2 replayedJob) {
Database db = GlobalStateMgr.getCurrentState().getDb(dbId);
if (db != null) {
Locker locker = new Locker();
locker.lockDatabase(db, LockType.WRITE);
try {
OlapTable tbl = (OlapTable) db.getTable(tableId);
if (tbl != null) {
onReplayFinished(replayedJob, tbl);
}
} finally {
locker.unLockDatabase(db, LockType.WRITE);
}
}
this.jobState = JobState.FINISHED;
this.finishedTimeMs = replayedJob.finishedTimeMs;
LOG.info("replay finished optimize job: {}", jobId);
}
/**
* Replay job in CANCELLED state.
*/
private void replayCancelled(OptimizeJobV2 replayedJob) {
cancelInternal();
this.jobState = JobState.CANCELLED;
this.finishedTimeMs = replayedJob.finishedTimeMs;
this.errMsg = replayedJob.errMsg;
LOG.info("replay cancelled optimize job: {}", jobId);
}
@Override
public void replay(AlterJobV2 replayedJob) {
OptimizeJobV2 replayedOptimizeJob = (OptimizeJobV2) replayedJob;
switch (replayedJob.jobState) {
case PENDING:
replayPending(replayedOptimizeJob);
break;
case WAITING_TXN:
replayWaitingTxn(replayedOptimizeJob);
break;
case FINISHED:
replayFinished(replayedOptimizeJob);
break;
case CANCELLED:
replayCancelled(replayedOptimizeJob);
break;
default:
break;
}
}
@Override
protected void getInfo(List<List<Comparable>> infos) {
List<Comparable> info = Lists.newArrayList();
info.add(jobId);
info.add(tableName);
info.add(TimeUtils.longToTimeString(createTimeMs));
info.add(TimeUtils.longToTimeString(finishedTimeMs));
info.add(optimizeOperation != null ? optimizeOperation : "");
info.add(watershedTxnId);
info.add(jobState.name());
info.add(errMsg);
info.add(progress);
info.add(timeoutMs / 1000);
infos.add(info);
}
public void setJobState(JobState jobState) {
this.jobState = jobState;
}
@Override
public void write(DataOutput out) throws IOException {
String json = GsonUtils.GSON.toJson(this, OptimizeJobV2.class);
Text.writeString(out, json);
}
@Override
public void gsonPostProcess() throws IOException {
if (jobState != JobState.PENDING) {
return;
}
}
@Override
public Optional<Long> getTransactionId() {
return watershedTxnId < 0 ? Optional.empty() : Optional.of(watershedTxnId);
}
} | class OptimizeJobV2 extends AlterJobV2 implements GsonPostProcessable {
private static final Logger LOG = LogManager.getLogger(OptimizeJobV2.class);
@SerializedName(value = "watershedTxnId")
protected long watershedTxnId = -1;
private final String postfix;
@SerializedName(value = "tmpPartitionIds")
private List<Long> tmpPartitionIds = Lists.newArrayList();
private OptimizeClause optimizeClause;
private String dbName = "";
private Map<String, String> properties = Maps.newHashMap();
@SerializedName(value = "rewriteTasks")
private List<OptimizeTask> rewriteTasks = Lists.newArrayList();
private int progress = 0;
@SerializedName(value = "sourcePartitionNames")
private List<String> sourcePartitionNames = Lists.newArrayList();
@SerializedName(value = "tmpPartitionNames")
private List<String> tmpPartitionNames = Lists.newArrayList();
@SerializedName(value = "allPartitionOptimized")
private Boolean allPartitionOptimized = false;
@SerializedName(value = "distributionInfo")
private DistributionInfo distributionInfo;
@SerializedName(value = "optimizeOperation")
private String optimizeOperation = "";
public OptimizeJobV2(long jobId, long dbId, long tableId, String tableName, long timeoutMs,
OptimizeClause optimizeClause) {
this(jobId, dbId, tableId, tableName, timeoutMs);
this.optimizeClause = optimizeClause;
}
public OptimizeJobV2(long jobId, long dbId, long tableId, String tableName, long timeoutMs) {
super(jobId, JobType.OPTIMIZE, dbId, tableId, tableName, timeoutMs);
this.postfix = "_" + jobId;
}
public List<Long> getTmpPartitionIds() {
return tmpPartitionIds;
}
public void setTmpPartitionIds(List<Long> tmpPartitionIds) {
this.tmpPartitionIds = tmpPartitionIds;
}
public String getName() {
return "optimize-" + this.postfix;
}
public Map<String, String> getProperties() {
return properties;
}
public List<OptimizeTask> getOptimizeTasks() {
return rewriteTasks;
}
private OlapTable checkAndGetTable(Database db, long tableId) throws AlterCancelException {
Table table = db.getTable(tableId);
if (table == null) {
throw new AlterCancelException("table: " + tableId + " does not exist in database: " + db.getFullName());
}
Preconditions.checkState(table instanceof OlapTable);
return (OlapTable) table;
}
/**
* runPendingJob():
* 1. Create all temp partitions and wait them finished.
* 2. Get a new transaction id, then set job's state to WAITING_TXN
*/
@Override
protected void runPendingJob() throws AlterCancelException {
Preconditions.checkState(jobState == JobState.PENDING, jobState);
LOG.info("begin to send create temp partitions. job: {}", jobId);
Database db = GlobalStateMgr.getCurrentState().getDb(dbId);
if (db == null) {
throw new AlterCancelException("Database " + dbId + " does not exist");
}
if (!checkTableStable(db)) {
return;
}
if (optimizeClause == null) {
throw new AlterCancelException("optimize clause is null since FE restart, job: " + jobId);
}
for (int i = 0; i < optimizeClause.getSourcePartitionIds().size(); ++i) {
tmpPartitionIds.add(GlobalStateMgr.getCurrentState().getNextId());
}
long createPartitionStartTimestamp = System.currentTimeMillis();
OlapTable targetTable;
Locker locker = new Locker();
locker.lockDatabase(db, LockType.READ);
try {
targetTable = checkAndGetTable(db, tableId);
} finally {
locker.unLockDatabase(db, LockType.READ);
}
try {
PartitionUtils.createAndAddTempPartitionsForTable(db, targetTable, postfix,
optimizeClause.getSourcePartitionIds(), getTmpPartitionIds(), optimizeClause.getDistributionDesc());
LOG.debug("create temp partitions {} success. job: {}", getTmpPartitionIds(), jobId);
} catch (Exception e) {
LOG.warn("create temp partitions failed", e);
throw new AlterCancelException("create temp partitions failed " + e);
}
long createPartitionElapse = System.currentTimeMillis() - createPartitionStartTimestamp;
this.watershedTxnId =
GlobalStateMgr.getCurrentGlobalTransactionMgr().getTransactionIDGenerator().getNextTransactionId();
this.jobState = JobState.WAITING_TXN;
this.optimizeOperation = optimizeClause.toString();
span.setAttribute("createPartitionElapse", createPartitionElapse);
span.setAttribute("watershedTxnId", this.watershedTxnId);
span.addEvent("setWaitingTxn");
GlobalStateMgr.getCurrentState().getEditLog().logAlterJob(this);
LOG.info("transfer optimize job {} state to {}, watershed txn_id: {}", jobId, this.jobState, watershedTxnId);
}
/**
* runWaitingTxnJob():
* 1. Wait the transactions before the watershedTxnId to be finished.
* 2. If all previous transactions finished, start insert into data to temp partitions.
* 3. Change job state to RUNNING.
*/
@Override
protected void runWaitingTxnJob() throws AlterCancelException {
Preconditions.checkState(jobState == JobState.WAITING_TXN, jobState);
if (optimizeClause == null) {
throw new AlterCancelException("optimize clause is null since FE restart, job: " + jobId);
}
try {
if (!isPreviousLoadFinished()) {
LOG.info("wait transactions before {} to be finished, optimize job: {}", watershedTxnId, jobId);
return;
}
} catch (AnalysisException e) {
throw new AlterCancelException(e.getMessage());
}
LOG.info("previous transactions are all finished, begin to optimize table. job: {}", jobId);
List<String> tmpPartitionNames;
List<String> partitionNames = Lists.newArrayList();
List<Long> partitionLastVersion = Lists.newArrayList();
List<String> tableCoumnNames = Lists.newArrayList();
Database db = GlobalStateMgr.getCurrentState().getDb(dbId);
if (db == null) {
throw new AlterCancelException("database id: " + dbId + " does not exist");
}
Locker locker = new Locker();
if (!locker.lockAndCheckExist(db, LockType.READ)) {
throw new AlterCancelException("insert overwrite commit failed because locking db: " + dbId + " failed");
}
try {
dbName = db.getFullName();
OlapTable targetTable = checkAndGetTable(db, tableId);
if (getTmpPartitionIds().stream().anyMatch(id -> targetTable.getPartition(id) == null)) {
throw new AlterCancelException("partitions changed during insert");
}
tmpPartitionNames = getTmpPartitionIds().stream()
.map(partitionId -> targetTable.getPartition(partitionId).getName())
.collect(Collectors.toList());
optimizeClause.getSourcePartitionIds().stream()
.map(partitionId -> targetTable.getPartition(partitionId)).forEach(
partition -> {
partitionNames.add(partition.getName());
partitionLastVersion.add(partition.getSubPartitions().stream()
.mapToLong(PhysicalPartition::getVisibleVersion).sum());
}
);
tableCoumnNames = targetTable.getBaseSchema().stream().filter(column -> !column.isGeneratedColumn())
.map(Column::getName).collect(Collectors.toList());
} finally {
locker.unLockDatabase(db, LockType.READ);
}
for (int i = 0; i < tmpPartitionNames.size(); ++i) {
String tmpPartitionName = tmpPartitionNames.get(i);
String partitionName = partitionNames.get(i);
String rewriteSql = "insert into " + tableName + " TEMPORARY PARTITION ("
+ tmpPartitionName + ") select " + Joiner.on(", ").join(tableCoumnNames)
+ " from " + tableName + " partition (" + partitionName + ")";
String taskName = getName() + "_" + tmpPartitionName;
OptimizeTask rewriteTask = TaskBuilder.buildOptimizeTask(taskName, properties, rewriteSql, dbName);
rewriteTask.setPartitionName(partitionName);
rewriteTask.setTempPartitionName(tmpPartitionName);
rewriteTask.setLastVersion(partitionLastVersion.get(i));
rewriteTasks.add(rewriteTask);
}
TaskManager taskManager = GlobalStateMgr.getCurrentState().getTaskManager();
for (OptimizeTask rewriteTask : rewriteTasks) {
try {
taskManager.createTask(rewriteTask, false);
taskManager.executeTask(rewriteTask.getName());
LOG.debug("create rewrite task {}", rewriteTask.toString());
} catch (DdlException e) {
rewriteTask.setOptimizeTaskState(Constants.TaskRunState.FAILED);
LOG.warn("create rewrite task failed", e);
}
}
this.jobState = JobState.RUNNING;
span.addEvent("setRunning");
LOG.info("transfer optimize job {} state to {}", jobId, this.jobState);
}
/**
* runRunningJob()
* 1. Wait insert into tasks to be finished.
* 2. Replace partitions with temp partitions.
* 3. Set job'state as FINISHED.
*/
@Override
protected void runRunningJob() throws AlterCancelException {
Preconditions.checkState(jobState == JobState.RUNNING, jobState);
Database db = GlobalStateMgr.getCurrentState().getDb(dbId);
if (db == null) {
throw new AlterCancelException("Databasee " + dbId + " does not exist");
}
OlapTable tbl = null;
Locker locker = new Locker();
locker.lockDatabase(db, LockType.READ);
try {
tbl = (OlapTable) db.getTable(tableId);
if (tbl == null) {
throw new AlterCancelException("Table " + tableId + " does not exist");
}
} finally {
locker.unLockDatabase(db, LockType.READ);
}
boolean allFinished = true;
int progress = 0;
for (OptimizeTask rewriteTask : rewriteTasks) {
if (rewriteTask.getOptimizeTaskState() == Constants.TaskRunState.FAILED
|| rewriteTask.getOptimizeTaskState() == Constants.TaskRunState.SUCCESS) {
progress += 100 / rewriteTasks.size();
continue;
}
TaskRun taskRun = GlobalStateMgr.getCurrentState().getTaskManager().getTaskRunManager()
.getRunnableTaskRun(rewriteTask.getId());
if (taskRun != null) {
if (taskRun.getStatus() != null) {
progress += taskRun.getStatus().getProgress() / rewriteTasks.size();
}
allFinished = false;
continue;
}
TaskRunStatus status = GlobalStateMgr.getCurrentState().getTaskManager()
.getTaskRunManager().getTaskRunHistory().getTaskByName(rewriteTask.getName());
if (status == null) {
allFinished = false;
continue;
}
if (status.getState() == Constants.TaskRunState.FAILED) {
LOG.warn("optimize task {} failed", rewriteTask.getName());
rewriteTask.setOptimizeTaskState(Constants.TaskRunState.FAILED);
}
progress += 100 / rewriteTasks.size();
}
if (!allFinished) {
LOG.debug("wait insert tasks to be finished, optimize job: {}", jobId);
this.progress = progress;
return;
}
this.progress = 99;
LOG.debug("all insert overwrite tasks finished, optimize job: {}", jobId);
locker.lockDatabase(db, LockType.WRITE);
try {
onFinished(db, tbl);
} finally {
locker.unLockDatabase(db, LockType.WRITE);
}
this.progress = 100;
this.jobState = JobState.FINISHED;
this.finishedTimeMs = System.currentTimeMillis();
GlobalStateMgr.getCurrentState().getEditLog().logAlterJob(this);
LOG.info("optimize job finished: {}", jobId);
this.span.end();
}
@Override
protected void runFinishedRewritingJob() {
}
/**
* cancelImpl() can be called any time any place.
* We need to clean any possible residual of this job.
*/
@Override
protected synchronized boolean cancelImpl(String errMsg) {
if (jobState.isFinalState()) {
return false;
}
cancelInternal();
jobState = JobState.CANCELLED;
this.errMsg = errMsg;
this.finishedTimeMs = System.currentTimeMillis();
LOG.info("cancel {} job {}, err: {}", this.type, jobId, errMsg);
GlobalStateMgr.getCurrentState().getEditLog().logAlterJob(this);
span.setStatus(StatusCode.ERROR, errMsg);
span.end();
return true;
}
private void cancelInternal() {
Database db = null;
Locker locker = new Locker();
try {
db = GlobalStateMgr.getCurrentState().getDb(dbId);
if (db == null) {
throw new AlterCancelException("database id:" + dbId + " does not exist");
}
if (!locker.lockAndCheckExist(db, LockType.WRITE)) {
throw new AlterCancelException("insert overwrite commit failed because locking db:" + dbId + " failed");
}
} catch (Exception e) {
LOG.warn("get and write lock database failed when cancel job: {}", jobId, e);
return;
}
try {
Table table = db.getTable(tableId);
if (table == null) {
throw new AlterCancelException("table:" + tableId + " does not exist in database:" + db.getFullName());
}
Preconditions.checkState(table instanceof OlapTable);
OlapTable targetTable = (OlapTable) table;
Set<Tablet> sourceTablets = Sets.newHashSet();
if (getTmpPartitionIds() != null) {
for (long pid : getTmpPartitionIds()) {
LOG.info("optimize job {} drop temp partition:{}", jobId, pid);
Partition partition = targetTable.getPartition(pid);
if (partition != null) {
for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) {
sourceTablets.addAll(index.getTablets());
}
targetTable.dropTempPartition(partition.getName(), true);
} else {
LOG.warn("partition {} is null", pid);
}
}
}
sourceTablets.forEach(GlobalStateMgr.getCurrentInvertedIndex()::markTabletForceDelete);
targetTable.setState(OlapTableState.NORMAL);
} catch (Exception e) {
LOG.warn("exception when cancel optimize job.", e);
} finally {
locker.unLockDatabase(db, LockType.WRITE);
}
}
protected boolean isPreviousLoadFinished() throws AnalysisException {
return GlobalStateMgr.getCurrentGlobalTransactionMgr()
.isPreviousTransactionsFinished(watershedTxnId, dbId, Lists.newArrayList(tableId));
}
/**
* Replay job in PENDING state.
* Should replay all changes before this job's state transfer to PENDING.
*/
private void replayPending(OptimizeJobV2 replayedJob) {
Database db = GlobalStateMgr.getCurrentState().getDb(dbId);
if (db == null) {
return;
}
Locker locker = new Locker();
locker.lockDatabase(db, LockType.WRITE);
try {
OlapTable tbl = (OlapTable) db.getTable(tableId);
if (tbl == null) {
return;
}
tbl.setState(OlapTableState.SCHEMA_CHANGE);
} finally {
locker.unLockDatabase(db, LockType.WRITE);
}
this.jobState = JobState.PENDING;
this.watershedTxnId = replayedJob.watershedTxnId;
this.optimizeOperation = replayedJob.optimizeOperation;
LOG.info("replay pending optimize job: {}", jobId);
}
/**
* Replay job in WAITING_TXN state.
* Should replay all changes in runPendingJob()
*/
private void replayWaitingTxn(OptimizeJobV2 replayedJob) {
Database db = GlobalStateMgr.getCurrentState().getDb(dbId);
if (db == null) {
return;
}
OlapTable tbl = null;
Locker locker = new Locker();
locker.lockDatabase(db, LockType.WRITE);
try {
tbl = (OlapTable) db.getTable(tableId);
if (tbl == null) {
return;
}
} finally {
locker.unLockDatabase(db, LockType.WRITE);
}
for (long id : replayedJob.getTmpPartitionIds()) {
tmpPartitionIds.add(id);
}
this.jobState = JobState.WAITING_TXN;
this.watershedTxnId = replayedJob.watershedTxnId;
this.optimizeOperation = replayedJob.optimizeOperation;
LOG.info("replay waiting txn optimize job: {}", jobId);
}
private void onReplayFinished(OptimizeJobV2 replayedJob, OlapTable targetTable) {
this.sourcePartitionNames = replayedJob.sourcePartitionNames;
this.tmpPartitionNames = replayedJob.tmpPartitionNames;
this.allPartitionOptimized = replayedJob.allPartitionOptimized;
this.optimizeOperation = replayedJob.optimizeOperation;
Set<Tablet> sourceTablets = Sets.newHashSet();
for (long id : replayedJob.getTmpPartitionIds()) {
Partition partition = targetTable.getPartition(id);
if (partition != null) {
for (MaterializedIndex index : partition.getMaterializedIndices(MaterializedIndex.IndexExtState.ALL)) {
sourceTablets.addAll(index.getTablets());
}
targetTable.dropTempPartition(partition.getName(), true);
}
}
sourceTablets.forEach(GlobalStateMgr.getCurrentInvertedIndex()::markTabletForceDelete);
if (allPartitionOptimized) {
this.distributionInfo = replayedJob.distributionInfo;
LOG.debug("set distribution info to table: {}", distributionInfo);
targetTable.setDefaultDistributionInfo(distributionInfo);
}
targetTable.setState(OlapTableState.NORMAL);
LOG.info("finish replay optimize job {} dbId:{}, tableId:{},"
+ "source partitions:{}, tmp partitions:{}, allOptimized:{}",
jobId, dbId, tableId, sourcePartitionNames, tmpPartitionNames, allPartitionOptimized);
}
/**
* Replay job in FINISHED state.
* Should replay all changes in runRuningJob()
*/
private void replayFinished(OptimizeJobV2 replayedJob) {
Database db = GlobalStateMgr.getCurrentState().getDb(dbId);
if (db != null) {
Locker locker = new Locker();
locker.lockDatabase(db, LockType.WRITE);
try {
OlapTable tbl = (OlapTable) db.getTable(tableId);
if (tbl != null) {
onReplayFinished(replayedJob, tbl);
}
} finally {
locker.unLockDatabase(db, LockType.WRITE);
}
}
this.jobState = JobState.FINISHED;
this.finishedTimeMs = replayedJob.finishedTimeMs;
LOG.info("replay finished optimize job: {}", jobId);
}
/**
* Replay job in CANCELLED state.
*/
private void replayCancelled(OptimizeJobV2 replayedJob) {
cancelInternal();
this.jobState = JobState.CANCELLED;
this.finishedTimeMs = replayedJob.finishedTimeMs;
this.errMsg = replayedJob.errMsg;
LOG.info("replay cancelled optimize job: {}", jobId);
}
@Override
public void replay(AlterJobV2 replayedJob) {
OptimizeJobV2 replayedOptimizeJob = (OptimizeJobV2) replayedJob;
switch (replayedJob.jobState) {
case PENDING:
replayPending(replayedOptimizeJob);
break;
case WAITING_TXN:
replayWaitingTxn(replayedOptimizeJob);
break;
case FINISHED:
replayFinished(replayedOptimizeJob);
break;
case CANCELLED:
replayCancelled(replayedOptimizeJob);
break;
default:
break;
}
}
@Override
protected void getInfo(List<List<Comparable>> infos) {
List<Comparable> info = Lists.newArrayList();
info.add(jobId);
info.add(tableName);
info.add(TimeUtils.longToTimeString(createTimeMs));
info.add(TimeUtils.longToTimeString(finishedTimeMs));
info.add(optimizeOperation != null ? optimizeOperation : "");
info.add(watershedTxnId);
info.add(jobState.name());
info.add(errMsg);
info.add(progress);
info.add(timeoutMs / 1000);
infos.add(info);
}
public void setJobState(JobState jobState) {
this.jobState = jobState;
}
@Override
public void write(DataOutput out) throws IOException {
String json = GsonUtils.GSON.toJson(this, OptimizeJobV2.class);
Text.writeString(out, json);
}
@Override
public void gsonPostProcess() throws IOException {
if (jobState != JobState.PENDING) {
return;
}
}
@Override
public Optional<Long> getTransactionId() {
return watershedTxnId < 0 ? Optional.empty() : Optional.of(watershedTxnId);
}
} |
Huh? I clearly remember deleting those lines of code >_< | public void deactivate(Application application, ZoneId zone) {
Optional<Deployment> deployment = Optional.empty();
if (false && deployment.isPresent()
&& ! DeploymentExpirer.hasExpired(controller.zoneRegistry(), deployment.get(), clock.instant()))
return;
lockOrThrow(application.id(), lockedApplication -> store(deactivate(lockedApplication, zone)));
} | if (false && deployment.isPresent() | public void deactivate(Application application, ZoneId zone) {
lockOrThrow(application.id(), lockedApplication -> store(deactivate(lockedApplication, zone)));
} | class ApplicationController {
private static final Logger log = Logger.getLogger(ApplicationController.class.getName());
/** The controller owning this */
private final Controller controller;
/** For persistence */
private final CuratorDb curator;
private final ArtifactRepository artifactRepository;
private final RotationRepository rotationRepository;
private final AthenzClientFactory zmsClientFactory;
private final NameService nameService;
private final ConfigServer configServer;
private final RoutingGenerator routingGenerator;
private final Clock clock;
private final DeploymentTrigger deploymentTrigger;
ApplicationController(Controller controller, CuratorDb curator,
AthenzClientFactory zmsClientFactory, RotationsConfig rotationsConfig,
NameService nameService, ConfigServer configServer,
ArtifactRepository artifactRepository,
RoutingGenerator routingGenerator, BuildService buildService, Clock clock) {
this.controller = controller;
this.curator = curator;
this.zmsClientFactory = zmsClientFactory;
this.nameService = nameService;
this.configServer = configServer;
this.routingGenerator = routingGenerator;
this.clock = clock;
this.artifactRepository = artifactRepository;
this.rotationRepository = new RotationRepository(rotationsConfig, this, curator);
this.deploymentTrigger = new DeploymentTrigger(controller, curator, buildService, clock);
for (Application application : curator.readApplications()) {
lockIfPresent(application.id(), this::store);
}
}
/** Returns the application with the given id, or null if it is not present */
public Optional<Application> get(ApplicationId id) {
return curator.readApplication(id);
}
/**
* Returns the application with the given id
*
* @throws IllegalArgumentException if it does not exist
*/
public Application require(ApplicationId id) {
return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found"));
}
/** Returns a snapshot of all applications */
public List<Application> asList() {
return sort(curator.readApplications());
}
/** Returns all applications of a tenant */
public List<Application> asList(TenantName tenant) {
return sort(curator.readApplications(tenant));
}
/**
* Set the rotations marked as 'global' either 'in' or 'out of' service.
*
* @return The canonical endpoint altered if any
* @throws IOException if rotation status cannot be updated
*/
public List<String> setGlobalRotationStatus(DeploymentId deploymentId, EndpointStatus status) throws IOException {
List<String> rotations = new ArrayList<>();
Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId);
if (endpoint.isPresent()) {
configServer.setGlobalRotationStatus(deploymentId, endpoint.get(), status);
rotations.add(endpoint.get());
}
return rotations;
}
/**
* Get the endpoint status for the global endpoint of this application
*
* @return Map between the endpoint and the rotation status
* @throws IOException if global rotation status cannot be determined
*/
public Map<String, EndpointStatus> getGlobalRotationStatus(DeploymentId deploymentId) throws IOException {
Map<String, EndpointStatus> result = new HashMap<>();
Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId);
if (endpoint.isPresent()) {
EndpointStatus status = configServer.getGlobalRotationStatus(deploymentId, endpoint.get());
result.put(endpoint.get(), status);
}
return result;
}
/**
* Global rotations (plural as we can have aliases) map to exactly one service endpoint.
* This method finds that one service endpoint and strips the URI part that
* the routingGenerator is wrapping around the endpoint.
*
* @param deploymentId The deployment to retrieve global service endpoint for
* @return Empty if no global endpoint exist, otherwise the service endpoint ([clustername.]app.tenant.region.env)
*/
Optional<String> getCanonicalGlobalEndpoint(DeploymentId deploymentId) throws IOException {
Map<String, RoutingEndpoint> hostToGlobalEndpoint = new HashMap<>();
Map<String, String> hostToCanonicalEndpoint = new HashMap<>();
for (RoutingEndpoint endpoint : routingGenerator.endpoints(deploymentId)) {
try {
URI uri = new URI(endpoint.getEndpoint());
String serviceEndpoint = uri.getHost();
if (serviceEndpoint == null) {
throw new IOException("Unexpected endpoints returned from the Routing Generator");
}
String canonicalEndpoint = serviceEndpoint.replaceAll(".vespa.yahooapis.com", "");
String hostname = endpoint.getHostname();
if (hostname != null) {
if (endpoint.isGlobal()) {
hostToGlobalEndpoint.put(hostname, endpoint);
} else {
hostToCanonicalEndpoint.put(hostname, canonicalEndpoint);
}
if (hostToGlobalEndpoint.containsKey(hostname) && hostToCanonicalEndpoint.containsKey(hostname)) {
return Optional.of(hostToCanonicalEndpoint.get(hostname));
}
}
} catch (URISyntaxException use) {
throw new IOException(use);
}
}
return Optional.empty();
}
/**
* Creates a new application for an existing tenant.
*
* @throws IllegalArgumentException if the application already exists
*/
public Application createApplication(ApplicationId id, Optional<NToken> token) {
if ( ! (id.instance().isDefault() || id.instance().value().matches("\\d+")))
throw new UnsupportedOperationException("Only the instance names 'default' and names which are just the PR number are supported at the moment");
try (Lock lock = lock(id)) {
if (asList(id.tenant()).stream().noneMatch(application -> application.id().application().equals(id.application())))
com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value());
Optional<Tenant> tenant = controller.tenants().tenant(id.tenant());
if ( ! tenant.isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist");
if (get(id).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application already exists");
if (get(dashToUnderscore(id)).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists");
if (id.instance().isDefault() && tenant.get() instanceof AthenzTenant) {
if ( ! token.isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': No NToken provided");
ZmsClient zmsClient = zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get());
zmsClient.addApplication(((AthenzTenant) tenant.get()).domain(),
new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value()));
}
LockedApplication application = new LockedApplication(new Application(id), lock);
store(application);
log.info("Created " + application);
return application;
}
}
/** Deploys an application. If the application does not exist it is created. */
public ActivateResult deployApplication(ApplicationId applicationId, ZoneId zone,
Optional<ApplicationPackage> applicationPackageFromDeployer,
DeployOptions options) {
try (Lock lock = lock(applicationId)) {
LockedApplication application = get(applicationId)
.map(app -> new LockedApplication(app, lock))
.orElseGet(() -> new LockedApplication(createApplication(applicationId, Optional.empty()), lock));
boolean canDeployDirectly = ! options.screwdriverBuildJob.map(job1 -> job1.screwdriverId).isPresent()
|| zone.environment().isManuallyDeployed();
boolean preferOldestVersion = options.deployCurrentVersion;
Version platformVersion;
ApplicationVersion applicationVersion;
ApplicationPackage applicationPackage;
if (canDeployDirectly) {
platformVersion = options.vespaVersion.map(Version::new).orElse(controller.systemVersion());
applicationVersion = ApplicationVersion.unknown;
applicationPackage = applicationPackageFromDeployer.orElseThrow(
() -> new IllegalArgumentException("Application package must be given when deploying to " + zone));
} else {
JobType jobType = JobType.from(controller.system(), zone)
.orElseThrow(() -> new IllegalArgumentException("No job found for zone " + zone));
Optional<JobRun> triggered = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType))
.flatMap(JobStatus::lastTriggered);
if ( ! triggered.isPresent())
return unexpectedDeployment(applicationId, zone);
platformVersion = preferOldestVersion
? triggered.get().sourcePlatform().orElse(triggered.get().platform())
: triggered.get().platform();
applicationVersion = preferOldestVersion
? triggered.get().sourceApplication().orElse(triggered.get().application())
: triggered.get().application();
applicationPackage = new ApplicationPackage(artifactRepository.getApplicationPackage(application.id(), applicationVersion.id()));
validateRun(application, zone, platformVersion, applicationVersion);
}
validate(applicationPackage.deploymentSpec());
if ( ! preferOldestVersion) {
application = application.with(applicationPackage.deploymentSpec());
application = application.with(applicationPackage.validationOverrides());
application = deleteRemovedDeployments(application);
application = deleteUnreferencedDeploymentJobs(application);
store(application);
}
application = withRotation(application, zone);
Set<String> rotationNames = new HashSet<>();
Set<String> cnames = new HashSet<>();
application.rotation().ifPresent(applicationRotation -> {
rotationNames.add(applicationRotation.id().asString());
cnames.add(applicationRotation.dnsName());
cnames.add(applicationRotation.secureDnsName());
});
options = withVersion(platformVersion, options);
DeploymentId deploymentId = new DeploymentId(applicationId, zone);
ConfigServer.PreparedApplication preparedApplication =
configServer.deploy(deploymentId, options, cnames, rotationNames, applicationPackage.zippedContent());
application = application.withNewDeployment(zone, applicationVersion, platformVersion, clock.instant());
store(application);
return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(),
applicationPackage.zippedContent().length);
}
}
/** Makes sure the application has a global rotation, if eligible. */
private LockedApplication withRotation(LockedApplication application, ZoneId zone) {
if (zone.environment() == Environment.prod && application.deploymentSpec().globalServiceId().isPresent()) {
try (RotationLock rotationLock = rotationRepository.lock()) {
Rotation rotation = rotationRepository.getOrAssignRotation(application, rotationLock);
application = application.with(rotation.id());
store(application);
registerRotationInDns(rotation, application.rotation().get().dnsName());
registerRotationInDns(rotation, application.rotation().get().secureDnsName());
}
}
return application;
}
private ActivateResult unexpectedDeployment(ApplicationId applicationId, ZoneId zone) {
Log logEntry = new Log();
logEntry.level = "WARNING";
logEntry.time = clock.instant().toEpochMilli();
logEntry.message = "Ignoring deployment of " + require(applicationId) + " to " + zone +
" as a deployment is not currently expected";
PrepareResponse prepareResponse = new PrepareResponse();
prepareResponse.log = Collections.singletonList(logEntry);
prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList());
return new ActivateResult(new RevisionId("0"), prepareResponse, 0);
}
private LockedApplication deleteRemovedDeployments(LockedApplication application) {
List<Deployment> deploymentsToRemove = application.productionDeployments().values().stream()
.filter(deployment -> ! application.deploymentSpec().includes(deployment.zone().environment(),
Optional.of(deployment.zone().region())))
.collect(Collectors.toList());
if (deploymentsToRemove.isEmpty()) return application;
if ( ! application.validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant()))
throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application +
" is deployed in " +
deploymentsToRemove.stream()
.map(deployment -> deployment.zone().region().value())
.collect(Collectors.joining(", ")) +
", but does not include " +
(deploymentsToRemove.size() > 1 ? "these zones" : "this zone") +
" in deployment.xml");
LockedApplication applicationWithRemoval = application;
for (Deployment deployment : deploymentsToRemove)
applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone());
return applicationWithRemoval;
}
private LockedApplication deleteUnreferencedDeploymentJobs(LockedApplication application) {
for (JobType job : application.deploymentJobs().jobStatus().keySet()) {
Optional<ZoneId> zone = job.zone(controller.system());
if ( ! job.isProduction() || (zone.isPresent() && application.deploymentSpec().includes(zone.get().environment(), zone.map(ZoneId::region))))
continue;
application = application.withoutDeploymentJob(job);
}
return application;
}
private DeployOptions withVersion(Version version, DeployOptions options) {
return new DeployOptions(options.screwdriverBuildJob,
Optional.of(version),
options.ignoreValidationErrors,
options.deployCurrentVersion);
}
/** Register a DNS name for rotation */
private void registerRotationInDns(Rotation rotation, String dnsName) {
try {
Optional<Record> record = nameService.findRecord(Record.Type.CNAME, RecordName.from(dnsName));
RecordData rotationName = RecordData.fqdn(rotation.name());
if (record.isPresent()) {
if ( ! record.get().data().equals(rotationName)) {
nameService.updateRecord(record.get().id(), rotationName);
log.info("Updated mapping for record ID " + record.get().id().asString() + ": '" + dnsName
+ "' -> '" + rotation.name() + "'");
}
} else {
RecordId id = nameService.createCname(RecordName.from(dnsName), rotationName);
log.info("Registered mapping with record ID " + id.asString() + ": '" + dnsName + "' -> '"
+ rotation.name() + "'");
}
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to register CNAME", e);
}
}
/** Returns the endpoints of the deployment, or an empty list if the request fails */
public Optional<List<URI>> getDeploymentEndpoints(DeploymentId deploymentId) {
try {
return Optional.of(ImmutableList.copyOf(routingGenerator.endpoints(deploymentId).stream()
.map(RoutingEndpoint::getEndpoint)
.map(URI::create)
.iterator()));
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId + ": "
+ Exceptions.toMessageString(e));
return Optional.empty();
}
}
/**
* Deletes the the given application. All known instances of the applications will be deleted,
* including PR instances.
*
* @throws IllegalArgumentException if the application has deployments or the caller is not authorized
* @throws NotExistsException if no instances of the application exist
*/
public void deleteApplication(ApplicationId applicationId, Optional<NToken> token) {
List<ApplicationId> instances = controller.applications().asList(applicationId.tenant())
.stream()
.map(Application::id)
.filter(id -> id.application().equals(applicationId.application()) &&
id.tenant().equals(applicationId.tenant()))
.collect(Collectors.toList());
if (instances.isEmpty()) {
throw new NotExistsException("Could not delete application '" + applicationId + "': Application not found");
}
instances.forEach(id -> lockOrThrow(id, application -> {
if ( ! application.deployments().isEmpty())
throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments");
Tenant tenant = controller.tenants().tenant(id.tenant()).get();
if (tenant instanceof AthenzTenant && ! token.isPresent())
throw new IllegalArgumentException("Could not delete '" + application + "': No NToken provided");
if (id.instance().isDefault() && tenant instanceof AthenzTenant) {
zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get())
.deleteApplication(((AthenzTenant) tenant).domain(),
new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value()));
}
curator.removeApplication(id);
log.info("Deleted " + application);
}));
}
/**
* Replace any previous version of this application by this instance
*
* @param application a locked application to store
*/
public void store(LockedApplication application) {
curator.writeApplication(application);
}
/**
* Acquire a locked application to modify and store, if there is an application with the given id.
*
* @param applicationId ID of the application to lock and get.
* @param action Function which acts on the locked application.
*/
public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action);
}
}
/**
* Acquire a locked application to modify and store, or throw an exception if no application has the given id.
*
* @param applicationId ID of the application to lock and require.
* @param action Function which acts on the locked application.
* @throws IllegalArgumentException when application does not exist.
*/
public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
action.accept(new LockedApplication(require(applicationId), lock));
}
}
/**
* Tells config server to schedule a restart of all nodes in this deployment
*
* @param hostname If non-empty, restart will only be scheduled for this host
*/
public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) {
try {
configServer.restart(deploymentId, hostname);
}
catch (NoInstanceException e) {
throw new IllegalArgumentException("Could not restart " + deploymentId + ": No such deployment");
}
}
/** Deactivate application in the given zone */
/**
* Deactivates a locked application without storing it
*
* @return the application with the deployment in the given zone removed
*/
private LockedApplication deactivate(LockedApplication application, ZoneId zone) {
try {
configServer.deactivate(new DeploymentId(application.id(), zone));
}
catch (NoInstanceException ignored) {
}
return application.withoutDeploymentIn(zone);
}
public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; }
private ApplicationId dashToUnderscore(ApplicationId id) {
return ApplicationId.from(id.tenant().value(),
id.application().value().replaceAll("-", "_"),
id.instance().value());
}
public ConfigServer configServer() { return configServer; }
/**
* Returns a lock which provides exclusive rights to changing this application.
* Any operation which stores an application need to first acquire this lock, then read, modify
* and store the application, and finally release (close) the lock.
*/
Lock lock(ApplicationId application) {
return curator.lock(application, Duration.ofMinutes(10));
}
/** Verify that each of the production zones listed in the deployment spec exist in this system. */
private void validate(DeploymentSpec deploymentSpec) {
deploymentSpec.zones().stream()
.filter(zone -> zone.environment() == Environment.prod)
.forEach(zone -> {
if ( ! controller.zoneRegistry().hasZone(ZoneId.from(zone.environment(),
zone.region().orElse(null)))) {
throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!");
}
});
}
/** Verify that we don't downgrade an existing production deployment. */
private void validateRun(Application application, ZoneId zone, Version platformVersion, ApplicationVersion applicationVersion) {
Deployment deployment = application.deployments().get(zone);
if ( zone.environment().isProduction() && deployment != null
&& ( platformVersion.compareTo(deployment.version()) < 0
|| applicationVersion.compareTo(deployment.applicationVersion()) < 0))
throw new IllegalArgumentException(String.format("Rejecting deployment of %s to %s, as the requested versions (platform: %s, application: %s)" +
" are older than the currently deployed (platform: %s, application: %s).",
application, zone, platformVersion, applicationVersion, deployment.version(), deployment.applicationVersion()));
}
public RotationRepository rotationRepository() {
return rotationRepository;
}
/** Sort given list of applications by application ID */
private static List<Application> sort(List<Application> applications) {
return applications.stream().sorted(Comparator.comparing(Application::id)).collect(Collectors.toList());
}
} | class ApplicationController {
private static final Logger log = Logger.getLogger(ApplicationController.class.getName());
/** The controller owning this */
private final Controller controller;
/** For persistence */
private final CuratorDb curator;
private final ArtifactRepository artifactRepository;
private final RotationRepository rotationRepository;
private final AthenzClientFactory zmsClientFactory;
private final NameService nameService;
private final ConfigServer configServer;
private final RoutingGenerator routingGenerator;
private final Clock clock;
private final DeploymentTrigger deploymentTrigger;
ApplicationController(Controller controller, CuratorDb curator,
AthenzClientFactory zmsClientFactory, RotationsConfig rotationsConfig,
NameService nameService, ConfigServer configServer,
ArtifactRepository artifactRepository,
RoutingGenerator routingGenerator, BuildService buildService, Clock clock) {
this.controller = controller;
this.curator = curator;
this.zmsClientFactory = zmsClientFactory;
this.nameService = nameService;
this.configServer = configServer;
this.routingGenerator = routingGenerator;
this.clock = clock;
this.artifactRepository = artifactRepository;
this.rotationRepository = new RotationRepository(rotationsConfig, this, curator);
this.deploymentTrigger = new DeploymentTrigger(controller, curator, buildService, clock);
for (Application application : curator.readApplications()) {
lockIfPresent(application.id(), this::store);
}
}
/** Returns the application with the given id, or null if it is not present */
public Optional<Application> get(ApplicationId id) {
return curator.readApplication(id);
}
/**
* Returns the application with the given id
*
* @throws IllegalArgumentException if it does not exist
*/
public Application require(ApplicationId id) {
return get(id).orElseThrow(() -> new IllegalArgumentException(id + " not found"));
}
/** Returns a snapshot of all applications */
public List<Application> asList() {
return sort(curator.readApplications());
}
/** Returns all applications of a tenant */
public List<Application> asList(TenantName tenant) {
return sort(curator.readApplications(tenant));
}
/**
* Set the rotations marked as 'global' either 'in' or 'out of' service.
*
* @return The canonical endpoint altered if any
* @throws IOException if rotation status cannot be updated
*/
public List<String> setGlobalRotationStatus(DeploymentId deploymentId, EndpointStatus status) throws IOException {
List<String> rotations = new ArrayList<>();
Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId);
if (endpoint.isPresent()) {
configServer.setGlobalRotationStatus(deploymentId, endpoint.get(), status);
rotations.add(endpoint.get());
}
return rotations;
}
/**
* Get the endpoint status for the global endpoint of this application
*
* @return Map between the endpoint and the rotation status
* @throws IOException if global rotation status cannot be determined
*/
public Map<String, EndpointStatus> getGlobalRotationStatus(DeploymentId deploymentId) throws IOException {
Map<String, EndpointStatus> result = new HashMap<>();
Optional<String> endpoint = getCanonicalGlobalEndpoint(deploymentId);
if (endpoint.isPresent()) {
EndpointStatus status = configServer.getGlobalRotationStatus(deploymentId, endpoint.get());
result.put(endpoint.get(), status);
}
return result;
}
/**
* Global rotations (plural as we can have aliases) map to exactly one service endpoint.
* This method finds that one service endpoint and strips the URI part that
* the routingGenerator is wrapping around the endpoint.
*
* @param deploymentId The deployment to retrieve global service endpoint for
* @return Empty if no global endpoint exist, otherwise the service endpoint ([clustername.]app.tenant.region.env)
*/
Optional<String> getCanonicalGlobalEndpoint(DeploymentId deploymentId) throws IOException {
Map<String, RoutingEndpoint> hostToGlobalEndpoint = new HashMap<>();
Map<String, String> hostToCanonicalEndpoint = new HashMap<>();
for (RoutingEndpoint endpoint : routingGenerator.endpoints(deploymentId)) {
try {
URI uri = new URI(endpoint.getEndpoint());
String serviceEndpoint = uri.getHost();
if (serviceEndpoint == null) {
throw new IOException("Unexpected endpoints returned from the Routing Generator");
}
String canonicalEndpoint = serviceEndpoint.replaceAll(".vespa.yahooapis.com", "");
String hostname = endpoint.getHostname();
if (hostname != null) {
if (endpoint.isGlobal()) {
hostToGlobalEndpoint.put(hostname, endpoint);
} else {
hostToCanonicalEndpoint.put(hostname, canonicalEndpoint);
}
if (hostToGlobalEndpoint.containsKey(hostname) && hostToCanonicalEndpoint.containsKey(hostname)) {
return Optional.of(hostToCanonicalEndpoint.get(hostname));
}
}
} catch (URISyntaxException use) {
throw new IOException(use);
}
}
return Optional.empty();
}
/**
* Creates a new application for an existing tenant.
*
* @throws IllegalArgumentException if the application already exists
*/
public Application createApplication(ApplicationId id, Optional<NToken> token) {
if ( ! (id.instance().isDefault() || id.instance().value().matches("\\d+")))
throw new UnsupportedOperationException("Only the instance names 'default' and names which are just the PR number are supported at the moment");
try (Lock lock = lock(id)) {
if (asList(id.tenant()).stream().noneMatch(application -> application.id().application().equals(id.application())))
com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value());
Optional<Tenant> tenant = controller.tenants().tenant(id.tenant());
if ( ! tenant.isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist");
if (get(id).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application already exists");
if (get(dashToUnderscore(id)).isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists");
if (id.instance().isDefault() && tenant.get() instanceof AthenzTenant) {
if ( ! token.isPresent())
throw new IllegalArgumentException("Could not create '" + id + "': No NToken provided");
ZmsClient zmsClient = zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get());
zmsClient.addApplication(((AthenzTenant) tenant.get()).domain(),
new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value()));
}
LockedApplication application = new LockedApplication(new Application(id), lock);
store(application);
log.info("Created " + application);
return application;
}
}
/** Deploys an application. If the application does not exist it is created. */
public ActivateResult deployApplication(ApplicationId applicationId, ZoneId zone,
Optional<ApplicationPackage> applicationPackageFromDeployer,
DeployOptions options) {
try (Lock lock = lock(applicationId)) {
LockedApplication application = get(applicationId)
.map(app -> new LockedApplication(app, lock))
.orElseGet(() -> new LockedApplication(createApplication(applicationId, Optional.empty()), lock));
boolean canDeployDirectly = ! options.screwdriverBuildJob.map(job1 -> job1.screwdriverId).isPresent()
|| zone.environment().isManuallyDeployed();
boolean preferOldestVersion = options.deployCurrentVersion;
Version platformVersion;
ApplicationVersion applicationVersion;
ApplicationPackage applicationPackage;
if (canDeployDirectly) {
platformVersion = options.vespaVersion.map(Version::new).orElse(controller.systemVersion());
applicationVersion = ApplicationVersion.unknown;
applicationPackage = applicationPackageFromDeployer.orElseThrow(
() -> new IllegalArgumentException("Application package must be given when deploying to " + zone));
} else {
JobType jobType = JobType.from(controller.system(), zone)
.orElseThrow(() -> new IllegalArgumentException("No job found for zone " + zone));
Optional<JobRun> triggered = Optional.ofNullable(application.deploymentJobs().jobStatus().get(jobType))
.flatMap(JobStatus::lastTriggered);
if ( ! triggered.isPresent())
return unexpectedDeployment(applicationId, zone);
platformVersion = preferOldestVersion
? triggered.get().sourcePlatform().orElse(triggered.get().platform())
: triggered.get().platform();
applicationVersion = preferOldestVersion
? triggered.get().sourceApplication().orElse(triggered.get().application())
: triggered.get().application();
applicationPackage = new ApplicationPackage(artifactRepository.getApplicationPackage(application.id(), applicationVersion.id()));
validateRun(application, zone, platformVersion, applicationVersion);
}
validate(applicationPackage.deploymentSpec());
if ( ! preferOldestVersion) {
application = application.with(applicationPackage.deploymentSpec());
application = application.with(applicationPackage.validationOverrides());
application = deleteRemovedDeployments(application);
application = deleteUnreferencedDeploymentJobs(application);
store(application);
}
application = withRotation(application, zone);
Set<String> rotationNames = new HashSet<>();
Set<String> cnames = new HashSet<>();
application.rotation().ifPresent(applicationRotation -> {
rotationNames.add(applicationRotation.id().asString());
cnames.add(applicationRotation.dnsName());
cnames.add(applicationRotation.secureDnsName());
});
options = withVersion(platformVersion, options);
DeploymentId deploymentId = new DeploymentId(applicationId, zone);
ConfigServer.PreparedApplication preparedApplication =
configServer.deploy(deploymentId, options, cnames, rotationNames, applicationPackage.zippedContent());
application = application.withNewDeployment(zone, applicationVersion, platformVersion, clock.instant());
store(application);
return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(),
applicationPackage.zippedContent().length);
}
}
/** Makes sure the application has a global rotation, if eligible. */
private LockedApplication withRotation(LockedApplication application, ZoneId zone) {
if (zone.environment() == Environment.prod && application.deploymentSpec().globalServiceId().isPresent()) {
try (RotationLock rotationLock = rotationRepository.lock()) {
Rotation rotation = rotationRepository.getOrAssignRotation(application, rotationLock);
application = application.with(rotation.id());
store(application);
registerRotationInDns(rotation, application.rotation().get().dnsName());
registerRotationInDns(rotation, application.rotation().get().secureDnsName());
}
}
return application;
}
private ActivateResult unexpectedDeployment(ApplicationId applicationId, ZoneId zone) {
Log logEntry = new Log();
logEntry.level = "WARNING";
logEntry.time = clock.instant().toEpochMilli();
logEntry.message = "Ignoring deployment of " + require(applicationId) + " to " + zone +
" as a deployment is not currently expected";
PrepareResponse prepareResponse = new PrepareResponse();
prepareResponse.log = Collections.singletonList(logEntry);
prepareResponse.configChangeActions = new ConfigChangeActions(Collections.emptyList(), Collections.emptyList());
return new ActivateResult(new RevisionId("0"), prepareResponse, 0);
}
private LockedApplication deleteRemovedDeployments(LockedApplication application) {
List<Deployment> deploymentsToRemove = application.productionDeployments().values().stream()
.filter(deployment -> ! application.deploymentSpec().includes(deployment.zone().environment(),
Optional.of(deployment.zone().region())))
.collect(Collectors.toList());
if (deploymentsToRemove.isEmpty()) return application;
if ( ! application.validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant()))
throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application +
" is deployed in " +
deploymentsToRemove.stream()
.map(deployment -> deployment.zone().region().value())
.collect(Collectors.joining(", ")) +
", but does not include " +
(deploymentsToRemove.size() > 1 ? "these zones" : "this zone") +
" in deployment.xml");
LockedApplication applicationWithRemoval = application;
for (Deployment deployment : deploymentsToRemove)
applicationWithRemoval = deactivate(applicationWithRemoval, deployment.zone());
return applicationWithRemoval;
}
private LockedApplication deleteUnreferencedDeploymentJobs(LockedApplication application) {
for (JobType job : application.deploymentJobs().jobStatus().keySet()) {
Optional<ZoneId> zone = job.zone(controller.system());
if ( ! job.isProduction() || (zone.isPresent() && application.deploymentSpec().includes(zone.get().environment(), zone.map(ZoneId::region))))
continue;
application = application.withoutDeploymentJob(job);
}
return application;
}
private DeployOptions withVersion(Version version, DeployOptions options) {
return new DeployOptions(options.screwdriverBuildJob,
Optional.of(version),
options.ignoreValidationErrors,
options.deployCurrentVersion);
}
/** Register a DNS name for rotation */
private void registerRotationInDns(Rotation rotation, String dnsName) {
try {
Optional<Record> record = nameService.findRecord(Record.Type.CNAME, RecordName.from(dnsName));
RecordData rotationName = RecordData.fqdn(rotation.name());
if (record.isPresent()) {
if ( ! record.get().data().equals(rotationName)) {
nameService.updateRecord(record.get().id(), rotationName);
log.info("Updated mapping for record ID " + record.get().id().asString() + ": '" + dnsName
+ "' -> '" + rotation.name() + "'");
}
} else {
RecordId id = nameService.createCname(RecordName.from(dnsName), rotationName);
log.info("Registered mapping with record ID " + id.asString() + ": '" + dnsName + "' -> '"
+ rotation.name() + "'");
}
} catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to register CNAME", e);
}
}
/** Returns the endpoints of the deployment, or an empty list if the request fails */
public Optional<List<URI>> getDeploymentEndpoints(DeploymentId deploymentId) {
try {
return Optional.of(ImmutableList.copyOf(routingGenerator.endpoints(deploymentId).stream()
.map(RoutingEndpoint::getEndpoint)
.map(URI::create)
.iterator()));
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to get endpoint information for " + deploymentId + ": "
+ Exceptions.toMessageString(e));
return Optional.empty();
}
}
/**
* Deletes the the given application. All known instances of the applications will be deleted,
* including PR instances.
*
* @throws IllegalArgumentException if the application has deployments or the caller is not authorized
* @throws NotExistsException if no instances of the application exist
*/
public void deleteApplication(ApplicationId applicationId, Optional<NToken> token) {
List<ApplicationId> instances = controller.applications().asList(applicationId.tenant())
.stream()
.map(Application::id)
.filter(id -> id.application().equals(applicationId.application()) &&
id.tenant().equals(applicationId.tenant()))
.collect(Collectors.toList());
if (instances.isEmpty()) {
throw new NotExistsException("Could not delete application '" + applicationId + "': Application not found");
}
instances.forEach(id -> lockOrThrow(id, application -> {
if ( ! application.deployments().isEmpty())
throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments");
Tenant tenant = controller.tenants().tenant(id.tenant()).get();
if (tenant instanceof AthenzTenant && ! token.isPresent())
throw new IllegalArgumentException("Could not delete '" + application + "': No NToken provided");
if (id.instance().isDefault() && tenant instanceof AthenzTenant) {
zmsClientFactory.createZmsClientWithAuthorizedServiceToken(token.get())
.deleteApplication(((AthenzTenant) tenant).domain(),
new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value()));
}
curator.removeApplication(id);
log.info("Deleted " + application);
}));
}
/**
* Replace any previous version of this application by this instance
*
* @param application a locked application to store
*/
public void store(LockedApplication application) {
curator.writeApplication(application);
}
/**
* Acquire a locked application to modify and store, if there is an application with the given id.
*
* @param applicationId ID of the application to lock and get.
* @param action Function which acts on the locked application.
*/
public void lockIfPresent(ApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
get(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action);
}
}
/**
* Acquire a locked application to modify and store, or throw an exception if no application has the given id.
*
* @param applicationId ID of the application to lock and require.
* @param action Function which acts on the locked application.
* @throws IllegalArgumentException when application does not exist.
*/
public void lockOrThrow(ApplicationId applicationId, Consumer<LockedApplication> action) {
try (Lock lock = lock(applicationId)) {
action.accept(new LockedApplication(require(applicationId), lock));
}
}
/**
* Tells config server to schedule a restart of all nodes in this deployment
*
* @param hostname If non-empty, restart will only be scheduled for this host
*/
public void restart(DeploymentId deploymentId, Optional<Hostname> hostname) {
try {
configServer.restart(deploymentId, hostname);
}
catch (NoInstanceException e) {
throw new IllegalArgumentException("Could not restart " + deploymentId + ": No such deployment");
}
}
/** Deactivate application in the given zone */
/**
* Deactivates a locked application without storing it
*
* @return the application with the deployment in the given zone removed
*/
private LockedApplication deactivate(LockedApplication application, ZoneId zone) {
try {
configServer.deactivate(new DeploymentId(application.id(), zone));
}
catch (NoInstanceException ignored) {
}
return application.withoutDeploymentIn(zone);
}
public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; }
private ApplicationId dashToUnderscore(ApplicationId id) {
return ApplicationId.from(id.tenant().value(),
id.application().value().replaceAll("-", "_"),
id.instance().value());
}
public ConfigServer configServer() { return configServer; }
/**
* Returns a lock which provides exclusive rights to changing this application.
* Any operation which stores an application need to first acquire this lock, then read, modify
* and store the application, and finally release (close) the lock.
*/
Lock lock(ApplicationId application) {
return curator.lock(application, Duration.ofMinutes(10));
}
/** Verify that each of the production zones listed in the deployment spec exist in this system. */
private void validate(DeploymentSpec deploymentSpec) {
deploymentSpec.zones().stream()
.filter(zone -> zone.environment() == Environment.prod)
.forEach(zone -> {
if ( ! controller.zoneRegistry().hasZone(ZoneId.from(zone.environment(),
zone.region().orElse(null)))) {
throw new IllegalArgumentException("Zone " + zone + " in deployment spec was not found in this system!");
}
});
}
/** Verify that we don't downgrade an existing production deployment. */
private void validateRun(Application application, ZoneId zone, Version platformVersion, ApplicationVersion applicationVersion) {
Deployment deployment = application.deployments().get(zone);
if ( zone.environment().isProduction() && deployment != null
&& ( platformVersion.compareTo(deployment.version()) < 0
|| applicationVersion.compareTo(deployment.applicationVersion()) < 0))
throw new IllegalArgumentException(String.format("Rejecting deployment of %s to %s, as the requested versions (platform: %s, application: %s)" +
" are older than the currently deployed (platform: %s, application: %s).",
application, zone, platformVersion, applicationVersion, deployment.version(), deployment.applicationVersion()));
}
public RotationRepository rotationRepository() {
return rotationRepository;
}
/** Sort given list of applications by application ID */
private static List<Application> sort(List<Application> applications) {
return applications.stream().sorted(Comparator.comparing(Application::id)).collect(Collectors.toList());
}
} |
These set statements don't really belong in addClusterContent. Move up to doBuild() or create a new method. | private void addClusterContent(ContainerCluster cluster, Element spec, ConfigModelContext context) {
DocumentFactoryBuilder.buildDocumentFactories(cluster, spec);
addConfiguredComponents(cluster, spec);
addSecretStore(cluster, spec);
addHandlers(cluster, spec);
addRestApis(spec, cluster);
addServlets(spec, cluster);
addProcessing(spec, cluster);
addSearch(spec, cluster, context.getDeployState().getQueryProfiles(), context.getDeployState().getSemanticRules());
addDocproc(spec, cluster);
addDocumentApi(spec, cluster);
addDefaultHandlers(cluster);
addStatusHandlers(cluster, context);
setDefaultMetricConsumerFactory(cluster);
addHttp(spec, cluster);
addAccessLogs(cluster, spec);
addRoutingAliases(cluster, spec, context.getDeployState().zone().environment());
addNodes(cluster, spec, context);
addClientProviders(spec, cluster);
addServerProviders(spec, cluster);
addLegacyFilters(spec, cluster);
addAthensCopperArgos(cluster, context);
cluster.setRpcServerEnabled(rpcServerEnabled);
cluster.setHttpServerEnabled(httpServerEnabled);
} | cluster.setRpcServerEnabled(rpcServerEnabled); | private void addClusterContent(ContainerCluster cluster, Element spec, ConfigModelContext context) {
DocumentFactoryBuilder.buildDocumentFactories(cluster, spec);
addConfiguredComponents(cluster, spec);
addSecretStore(cluster, spec);
addHandlers(cluster, spec);
addRestApis(spec, cluster);
addServlets(spec, cluster);
addProcessing(spec, cluster);
addSearch(spec, cluster, context.getDeployState().getQueryProfiles(), context.getDeployState().getSemanticRules());
addDocproc(spec, cluster);
addDocumentApi(spec, cluster);
addDefaultHandlers(cluster);
addStatusHandlers(cluster, context);
setDefaultMetricConsumerFactory(cluster);
addHttp(spec, cluster);
addAccessLogs(cluster, spec);
addRoutingAliases(cluster, spec, context.getDeployState().zone().environment());
addNodes(cluster, spec, context);
addClientProviders(spec, cluster);
addServerProviders(spec, cluster);
addLegacyFilters(spec, cluster);
addAthensCopperArgos(cluster, context);
} | class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
/**
* Default path to vip status file for container in Hosted Vespa.
*/
static final String HOSTED_VESPA_STATUS_FILE = Defaults.getDefaults().underVespaHome("var/mediasearch/oor/status.html");
/**
* Path to vip status file for container in Hosted Vespa. Only used if set, else use HOSTED_VESPA_STATUS_FILE
*/
private static final String HOSTED_VESPA_STATUS_FILE_INSTALL_SETTING = "cloudconfig_server__tenant_vip_status_file";
public enum Networking { disable, enable }
private ApplicationPackage app;
private final boolean standaloneBuilder;
private final Networking networking;
private final boolean rpcServerEnabled;
private final boolean httpServerEnabled;
protected DeployLogger log;
public static final List<ConfigModelId> configModelIds =
ImmutableList.of(ConfigModelId.fromName("container"), ConfigModelId.fromName("jdisc"));
private static final String xmlRendererId = RendererRegistry.xmlRendererId.getName();
private static final String jsonRendererId = RendererRegistry.jsonRendererId.getName();
public ContainerModelBuilder(boolean standaloneBuilder, Networking networking) {
super(ContainerModel.class);
this.standaloneBuilder = standaloneBuilder;
this.networking = networking;
this.rpcServerEnabled = !standaloneBuilder;
this.httpServerEnabled = networking == Networking.enable;
}
@Override
public List<ConfigModelId> handlesElements() {
return configModelIds;
}
@Override
public void doBuild(ContainerModel model, Element spec, ConfigModelContext modelContext) {
app = modelContext.getApplicationPackage();
checkVersion(spec);
this.log = modelContext.getDeployLogger();
ContainerCluster cluster = createContainerCluster(spec, modelContext);
addClusterContent(cluster, spec, modelContext);
addBundlesForPlatformComponents(cluster);
model.setCluster(cluster);
}
protected void addBundlesForPlatformComponents(ContainerCluster cluster) {
for (Component<?, ?> component : cluster.getAllComponents()) {
String componentClass = component.model.bundleInstantiationSpec.getClassName();
BundleMapper.getBundlePath(componentClass).
ifPresent(cluster::addPlatformBundle);
}
}
private ContainerCluster createContainerCluster(Element spec, final ConfigModelContext modelContext) {
return new VespaDomBuilder.DomConfigProducerBuilder<ContainerCluster>() {
@Override
protected ContainerCluster doBuild(AbstractConfigProducer ancestor, Element producerSpec) {
return new ContainerCluster(ancestor, modelContext.getProducerId(), modelContext.getProducerId());
}
}.build(modelContext.getParentProducer(), spec);
}
private void addSecretStore(ContainerCluster cluster, Element spec) {
Element secretStoreElement = XML.getChild(spec, "secret-store");
if (secretStoreElement != null) {
SecretStore secretStore = new SecretStore();
for (Element group : XML.getChildren(secretStoreElement, "group")) {
secretStore.addGroup(group.getAttribute("name"), group.getAttribute("environment"));
}
cluster.setSecretStore(secretStore);
}
}
private void addAthensCopperArgos(ContainerCluster cluster, ConfigModelContext context) {
app.getDeployment().map(DeploymentSpec::fromXml)
.ifPresent(deploymentSpec -> {
addIdentityProvider(cluster,
context.getDeployState().getProperties().configServerSpecs(),
context.getDeployState().getProperties().loadBalancerName(),
context.getDeployState().zone(),
deploymentSpec);
addRotationProperties(cluster, context.getDeployState().zone(), context.getDeployState().getRotations(), deploymentSpec);
});
}
private void addRotationProperties(ContainerCluster cluster, Zone zone, Set<Rotation> rotations, DeploymentSpec spec) {
cluster.getContainers().forEach(container -> {
setRotations(container, rotations, spec.globalServiceId(), cluster.getName());
container.setProp("activeRotation", Boolean.toString(zoneHasActiveRotation(zone, spec)));
});
}
private boolean zoneHasActiveRotation(Zone zone, DeploymentSpec spec) {
return spec.zones().stream()
.anyMatch(declaredZone -> declaredZone.deploysTo(zone.environment(), Optional.of(zone.region())) &&
declaredZone.active());
}
private void setRotations(Container container, Set<Rotation> rotations, Optional<String> globalServiceId, String containerClusterName) {
if ( ! rotations.isEmpty() && globalServiceId.isPresent()) {
if (containerClusterName.equals(globalServiceId.get())) {
container.setProp("rotations", rotations.stream().map(Rotation::getId).collect(Collectors.joining(",")));
}
}
}
private void addRoutingAliases(ContainerCluster cluster, Element spec, Environment environment) {
if (environment != Environment.prod) return;
Element aliases = XML.getChild(spec, "aliases");
for (Element alias : XML.getChildren(aliases, "service-alias")) {
cluster.serviceAliases().add(XML.getValue(alias));
}
for (Element alias : XML.getChildren(aliases, "endpoint-alias")) {
cluster.endpointAliases().add(XML.getValue(alias));
}
}
private void addConfiguredComponents(ContainerCluster cluster, Element spec) {
for (Element components : XML.getChildren(spec, "components")) {
addIncludes(components);
addConfiguredComponents(cluster, components, "component");
}
addConfiguredComponents(cluster, spec, "component");
}
protected void setDefaultMetricConsumerFactory(ContainerCluster cluster) {
cluster.setDefaultMetricConsumerFactory(MetricDefaultsConfig.Factory.Enum.STATE_MONITOR);
}
protected void addDefaultHandlers(ContainerCluster cluster) {
addDefaultHandlersExceptStatus(cluster);
}
protected void addStatusHandlers(ContainerCluster cluster, ConfigModelContext configModelContext) {
if (configModelContext.getDeployState().isHosted()) {
String name = "status.html";
Optional<String> statusFile = Optional.ofNullable(System.getenv(HOSTED_VESPA_STATUS_FILE_INSTALL_SETTING));
cluster.addComponent(
new FileStatusHandlerComponent(name + "-status-handler", statusFile.orElse(HOSTED_VESPA_STATUS_FILE),
"http:
} else {
cluster.addVipHandler();
}
}
/**
* Intended for use by legacy builders only.
* Will be called during building when using ContainerModelBuilder.
*/
public static void addDefaultHandler_legacyBuilder(ContainerCluster cluster) {
addDefaultHandlersExceptStatus(cluster);
cluster.addVipHandler();
}
protected static void addDefaultHandlersExceptStatus(ContainerCluster cluster) {
cluster.addDefaultRootHandler();
cluster.addMetricStateHandler();
cluster.addApplicationStatusHandler();
cluster.addStatisticsHandler();
}
private void addClientProviders(Element spec, ContainerCluster cluster) {
for (Element clientSpec: XML.getChildren(spec, "client")) {
cluster.addComponent(new DomClientProviderBuilder().build(cluster, clientSpec));
}
}
private void addServerProviders(Element spec, ContainerCluster cluster) {
addConfiguredComponents(cluster, spec, "server");
}
private void addLegacyFilters(Element spec, ContainerCluster cluster) {
for (Component component : buildLegacyFilters(cluster, spec)) {
cluster.addComponent(component);
}
}
private List<Component> buildLegacyFilters(AbstractConfigProducer ancestor,
Element spec) {
List<Component> components = new ArrayList<>();
for (Element node : XML.getChildren(spec, "filter")) {
components.add(new DomFilterBuilder().build(ancestor, node));
}
return components;
}
protected void addAccessLogs(ContainerCluster cluster, Element spec) {
List<Element> accessLogElements = getAccessLogElements(spec);
for (Element accessLog : accessLogElements) {
AccessLogBuilder.buildIfNotDisabled(cluster, accessLog).ifPresent(cluster::addComponent);
}
if (accessLogElements.isEmpty() && cluster.getSearch() != null)
cluster.addDefaultSearchAccessLog();
}
protected final List<Element> getAccessLogElements(Element spec) {
return XML.getChildren(spec, "accesslog");
}
protected void addHttp(Element spec, ContainerCluster cluster) {
Element httpElement = XML.getChild(spec, "http");
if (httpElement != null) {
cluster.setHttp(buildHttp(cluster, httpElement));
}
}
private Http buildHttp(ContainerCluster cluster, Element httpElement) {
Http http = new HttpBuilder().build(cluster, httpElement);
if (networking == Networking.disable)
http.removeAllServers();
return http;
}
protected void addRestApis(Element spec, ContainerCluster cluster) {
for (Element restApiElem : XML.getChildren(spec, "rest-api")) {
cluster.addRestApi(
new RestApiBuilder().build(cluster, restApiElem));
}
}
private void addServlets(Element spec, ContainerCluster cluster) {
for (Element servletElem : XML.getChildren(spec, "servlet")) {
cluster.addServlet(
new ServletBuilder().build(cluster, servletElem));
}
}
private void addDocumentApi(Element spec, ContainerCluster cluster) {
ContainerDocumentApi containerDocumentApi = buildDocumentApi(cluster, spec);
if (containerDocumentApi != null) {
cluster.setDocumentApi(containerDocumentApi);
}
}
private void addDocproc(Element spec, ContainerCluster cluster) {
ContainerDocproc containerDocproc = buildDocproc(cluster, spec);
if (containerDocproc != null) {
cluster.setDocproc(containerDocproc);
ContainerDocproc.Options docprocOptions = containerDocproc.options;
cluster.setMbusParams(new ContainerCluster.MbusParams(
docprocOptions.maxConcurrentFactor, docprocOptions.documentExpansionFactor, docprocOptions.containerCoreMemory));
}
}
private void addSearch(Element spec, ContainerCluster cluster, QueryProfiles queryProfiles, SemanticRules semanticRules) {
Element searchElement = XML.getChild(spec, "search");
if (searchElement != null) {
addIncludes(searchElement);
cluster.setSearch(buildSearch(cluster, searchElement, queryProfiles, semanticRules));
addSearchHandler(cluster, searchElement);
validateAndAddConfiguredComponents(cluster, searchElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
}
private void addProcessing(Element spec, ContainerCluster cluster) {
Element processingElement = XML.getChild(spec, "processing");
if (processingElement != null) {
addIncludes(processingElement);
cluster.setProcessingChains(new DomProcessingBuilder(null).build(cluster, processingElement),
serverBindings(processingElement, ProcessingChains.defaultBindings));
validateAndAddConfiguredComponents(cluster, processingElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
}
private ContainerSearch buildSearch(ContainerCluster containerCluster, Element producerSpec,
QueryProfiles queryProfiles, SemanticRules semanticRules) {
SearchChains searchChains = new DomSearchChainsBuilder(null, false).build(containerCluster, producerSpec);
ContainerSearch containerSearch = new ContainerSearch(containerCluster, searchChains, new ContainerSearch.Options());
applyApplicationPackageDirectoryConfigs(containerCluster.getRoot().getDeployState().getApplicationPackage(), containerSearch);
containerSearch.setQueryProfiles(queryProfiles);
containerSearch.setSemanticRules(semanticRules);
return containerSearch;
}
private void applyApplicationPackageDirectoryConfigs(ApplicationPackage applicationPackage,ContainerSearch containerSearch) {
PageTemplates.validate(applicationPackage);
containerSearch.setPageTemplates(PageTemplates.create(applicationPackage));
}
private void addHandlers(ContainerCluster cluster, Element spec) {
for (Element component: XML.getChildren(spec, "handler")) {
cluster.addComponent(
new DomHandlerBuilder().build(cluster, component));
}
}
private void checkVersion(Element spec) {
String version = spec.getAttribute("version");
if ( ! Version.fromString(version).equals(new Version(1))) {
throw new RuntimeException("Expected container version to be 1.0, but got " + version);
}
}
private void addNodes(ContainerCluster cluster, Element spec, ConfigModelContext context) {
if (standaloneBuilder)
addStandaloneNode(cluster);
else
addNodesFromXml(cluster, spec, context);
}
private void addStandaloneNode(ContainerCluster cluster) {
Container container = new Container(cluster, "standalone", cluster.getContainers().size());
cluster.addContainers(Collections.singleton(container));
}
private void addNodesFromXml(ContainerCluster cluster, Element containerElement, ConfigModelContext context) {
Element nodesElement = XML.getChild(containerElement, "nodes");
if (nodesElement == null) {
Container node = new Container(cluster, "container.0", 0);
HostResource host = allocateSingleNodeHost(cluster, log, containerElement, context);
node.setHostResource(host);
node.initService();
cluster.addContainers(Collections.singleton(node));
}
else {
List<Container> nodes = createNodes(cluster, nodesElement, context);
applyNodesTagJvmArgs(nodes, nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME));
applyRoutingAliasProperties(nodes, cluster);
applyDefaultPreload(nodes, nodesElement);
applyMemoryPercentage(cluster, nodesElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME));
if (useCpuSocketAffinity(nodesElement))
AbstractService.distributeCpuSocketAffinity(nodes);
cluster.addContainers(nodes);
}
}
private List<Container> createNodes(ContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
if (nodesElement.hasAttribute("count"))
return createNodesFromNodeCount(cluster, nodesElement, context);
else if (nodesElement.hasAttribute("type"))
return createNodesFromNodeType(cluster, nodesElement, context);
else if (nodesElement.hasAttribute("of"))
return createNodesFromContentServiceReference(cluster, nodesElement, context);
else
return createNodesFromNodeList(cluster, nodesElement);
}
private void applyRoutingAliasProperties(List<Container> result, ContainerCluster cluster) {
if (!cluster.serviceAliases().isEmpty()) {
result.forEach(container -> {
container.setProp("servicealiases", cluster.serviceAliases().stream().collect(Collectors.joining(",")));
});
}
if (!cluster.endpointAliases().isEmpty()) {
result.forEach(container -> {
container.setProp("endpointaliases", cluster.endpointAliases().stream().collect(Collectors.joining(",")));
});
}
}
private void applyMemoryPercentage(ContainerCluster cluster, String memoryPercentage) {
if (memoryPercentage == null || memoryPercentage.isEmpty()) return;
memoryPercentage = memoryPercentage.trim();
if ( ! memoryPercentage.endsWith("%"))
throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster +
" must be an integer percentage ending by the '%' sign");
memoryPercentage = memoryPercentage.substring(0, memoryPercentage.length()-1).trim();
try {
cluster.setMemoryPercentage(Optional.of(Integer.parseInt(memoryPercentage)));
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster +
" must be an integer percentage ending by the '%' sign");
}
}
/** Creates a single host when there is no nodes tag */
private HostResource allocateSingleNodeHost(ContainerCluster cluster, DeployLogger logger, Element containerElement, ConfigModelContext context) {
if (cluster.getRoot().getDeployState().isHosted()) {
Optional<HostResource> singleContentHost = getHostResourceFromContentClusters(cluster, containerElement, context);
if (singleContentHost.isPresent()) {
return singleContentHost.get();
}
else {
ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container,
ClusterSpec.Id.from(cluster.getName()),
context.getDeployState().getWantedNodeVespaVersion(),
false);
return cluster.getHostSystem().allocateHosts(clusterSpec, Capacity.fromNodeCount(1), 1, logger).keySet().iterator().next();
}
} else {
return cluster.getHostSystem().getHost(Container.SINGLENODE_CONTAINER_SERVICESPEC);
}
}
private List<Container> createNodesFromNodeCount(ContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodesSpecification nodesSpecification = NodesSpecification.from(new ModelElement(nodesElement),
context.getDeployState().getWantedNodeVespaVersion());
Map<HostResource, ClusterMembership> hosts = nodesSpecification.provision(cluster.getRoot().getHostSystem(),
ClusterSpec.Type.container,
ClusterSpec.Id.from(cluster.getName()),
log);
return createNodesFromHosts(hosts, cluster);
}
private List<Container> createNodesFromNodeType(ContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodeType type = NodeType.valueOf(nodesElement.getAttribute("type"));
ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container,
ClusterSpec.Id.from(cluster.getName()),
context.getDeployState().getWantedNodeVespaVersion(),
false);
Map<HostResource, ClusterMembership> hosts =
cluster.getRoot().getHostSystem().allocateHosts(clusterSpec,
Capacity.fromRequiredNodeType(type), 1, log);
return createNodesFromHosts(hosts, cluster);
}
private List<Container> createNodesFromContentServiceReference(ContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
String referenceId = nodesElement.getAttribute("of");
Element services = servicesRootOf(nodesElement).orElseThrow(() -> clusterReferenceNotFoundException(cluster, referenceId));
Element referencedService = findChildById(services, referenceId).orElseThrow(() -> clusterReferenceNotFoundException(cluster, referenceId));
if ( ! referencedService.getTagName().equals("content"))
throw new IllegalArgumentException(cluster + " references service '" + referenceId + "', " +
"but that is not a content service");
Element referencedNodesElement = XML.getChild(referencedService, "nodes");
if (referencedNodesElement == null)
throw new IllegalArgumentException(cluster + " references service '" + referenceId + "' to supply nodes, " +
"but that service has no <nodes> element");
cluster.setHostClusterId(referenceId);
Map<HostResource, ClusterMembership> hosts =
StorageGroup.provisionHosts(NodesSpecification.from(new ModelElement(referencedNodesElement),
context.getDeployState().getWantedNodeVespaVersion()),
referenceId,
cluster.getRoot().getHostSystem(),
context.getDeployLogger());
return createNodesFromHosts(hosts, cluster);
}
/**
* This is used in case we are on hosted Vespa and no nodes tag is supplied:
* If there are content clusters this will pick the first host in the first cluster as the container node.
* If there are no content clusters this will return empty (such that the node can be created by the container here).
*/
private Optional<HostResource> getHostResourceFromContentClusters(ContainerCluster cluster, Element containersElement, ConfigModelContext context) {
Optional<Element> services = servicesRootOf(containersElement);
if ( ! services.isPresent())
return Optional.empty();
List<Element> contentServices = XML.getChildren(services.get(), "content");
if ( contentServices.isEmpty() ) return Optional.empty();
Element contentNodesElementOrNull = XML.getChild(contentServices.get(0), "nodes");
NodesSpecification nodesSpec;
if (contentNodesElementOrNull == null)
nodesSpec = NodesSpecification.nonDedicated(1, context.getDeployState().getWantedNodeVespaVersion());
else
nodesSpec = NodesSpecification.from(new ModelElement(contentNodesElementOrNull), context.getDeployState().getWantedNodeVespaVersion());
Map<HostResource, ClusterMembership> hosts =
StorageGroup.provisionHosts(nodesSpec,
contentServices.get(0).getAttribute("id"),
cluster.getRoot().getHostSystem(),
context.getDeployLogger());
return Optional.of(hosts.keySet().iterator().next());
}
/** Returns the services element above the given Element, or empty if there is no services element */
private Optional<Element> servicesRootOf(Element element) {
Node parent = element.getParentNode();
if (parent == null) return Optional.empty();
if ( ! (parent instanceof Element)) return Optional.empty();
Element parentElement = (Element)parent;
if (parentElement.getTagName().equals("services")) return Optional.of(parentElement);
return servicesRootOf(parentElement);
}
private List<Container> createNodesFromHosts(Map<HostResource, ClusterMembership> hosts, ContainerCluster cluster) {
List<Container> nodes = new ArrayList<>();
for (Map.Entry<HostResource, ClusterMembership> entry : hosts.entrySet()) {
String id = "container." + entry.getValue().index();
Container container = new Container(cluster, id, entry.getValue().retired(), entry.getValue().index());
container.setHostResource(entry.getKey());
container.initService();
nodes.add(container);
}
return nodes;
}
private List<Container> createNodesFromNodeList(ContainerCluster cluster, Element nodesElement) {
List<Container> nodes = new ArrayList<>();
int nodeIndex = 0;
for (Element nodeElem: XML.getChildren(nodesElement, "node")) {
nodes.add(new ContainerServiceBuilder("container." + nodeIndex, nodeIndex).build(cluster, nodeElem));
nodeIndex++;
}
return nodes;
}
private IllegalArgumentException clusterReferenceNotFoundException(ContainerCluster cluster, String referenceId) {
return new IllegalArgumentException(cluster + " references service '" + referenceId +
"' but this service is not defined");
}
private Optional<Element> findChildById(Element parent, String id) {
for (Element child : XML.getChildren(parent))
if (id.equals(child.getAttribute("id"))) return Optional.of(child);
return Optional.empty();
}
private boolean useCpuSocketAffinity(Element nodesElement) {
if (nodesElement.hasAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME))
return Boolean.parseBoolean(nodesElement.getAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME));
else
return false;
}
private void applyNodesTagJvmArgs(List<Container> containers, String nodesTagJvnArgs) {
for (Container container: containers) {
if (container.getAssignedJvmArgs().isEmpty())
container.prependJvmArgs(nodesTagJvnArgs);
}
}
private void applyDefaultPreload(List<Container> containers, Element nodesElement) {
if (! nodesElement.hasAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME)) return;
for (Container container: containers)
container.setPreLoad(nodesElement.getAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME));
}
private void addSearchHandler(ContainerCluster cluster, Element searchElement) {
ProcessingHandler<SearchChains> searchHandler = new ProcessingHandler<>(
cluster.getSearch().getChains(), "com.yahoo.search.handler.SearchHandler");
String[] defaultBindings = {"http:
for (String binding: serverBindings(searchElement, defaultBindings)) {
searchHandler.addServerBindings(binding);
}
cluster.addComponent(searchHandler);
}
private String[] serverBindings(Element searchElement, String... defaultBindings) {
List<Element> bindings = XML.getChildren(searchElement, "binding");
if (bindings.isEmpty())
return defaultBindings;
return toBindingList(bindings);
}
private String[] toBindingList(List<Element> bindingElements) {
List<String> result = new ArrayList<>();
for (Element element: bindingElements) {
String text = element.getTextContent().trim();
if (!text.isEmpty())
result.add(text);
}
return result.toArray(new String[result.size()]);
}
private ContainerDocumentApi buildDocumentApi(ContainerCluster cluster, Element spec) {
Element documentApiElement = XML.getChild(spec, "document-api");
if (documentApiElement == null) return null;
ContainerDocumentApi.Options documentApiOptions = DocumentApiOptionsBuilder.build(documentApiElement);
return new ContainerDocumentApi(cluster, documentApiOptions);
}
private ContainerDocproc buildDocproc(ContainerCluster cluster, Element spec) {
Element docprocElement = XML.getChild(spec, "document-processing");
if (docprocElement == null)
return null;
addIncludes(docprocElement);
DocprocChains chains = new DomDocprocChainsBuilder(null, false).build(cluster, docprocElement);
ContainerDocproc.Options docprocOptions = DocprocOptionsBuilder.build(docprocElement);
return new ContainerDocproc(cluster, chains, docprocOptions, !standaloneBuilder);
}
private void addIncludes(Element parentElement) {
List<Element> includes = XML.getChildren(parentElement, IncludeDirs.INCLUDE);
if (includes == null || includes.isEmpty()) {
return;
}
if (app == null) {
throw new IllegalArgumentException("Element <include> given in XML config, but no application package given.");
}
for (Element include : includes) {
addInclude(parentElement, include);
}
}
private void addInclude(Element parentElement, Element include) {
String dirName = include.getAttribute(IncludeDirs.DIR);
app.validateIncludeDir(dirName);
List<Element> includedFiles = Xml.allElemsFromPath(app, dirName);
for (Element includedFile : includedFiles) {
List<Element> includedSubElements = XML.getChildren(includedFile);
for (Element includedSubElement : includedSubElements) {
Node copiedNode = parentElement.getOwnerDocument().importNode(includedSubElement, true);
parentElement.appendChild(copiedNode);
}
}
}
public static void addConfiguredComponents(ContainerCluster cluster, Element spec, String componentName) {
for (Element node : XML.getChildren(spec, componentName)) {
cluster.addComponent(new DomComponentBuilder().build(cluster, node));
}
}
public static void validateAndAddConfiguredComponents(ContainerCluster cluster, Element spec, String componentName, Consumer<Element> elementValidator) {
for (Element node : XML.getChildren(spec, componentName)) {
elementValidator.accept(node);
cluster.addComponent(new DomComponentBuilder().build(cluster, node));
}
}
private void addIdentityProvider(ContainerCluster cluster, List<ConfigServerSpec> configServerSpecs, HostName loadBalancerName, Zone zone, DeploymentSpec spec) {
spec.athenzDomain().ifPresent(domain -> {
AthenzService service = spec.athenzService(zone.environment(), zone.region())
.orElseThrow(() -> new RuntimeException("Missing Athenz service configuration"));
IdentityProvider identityProvider = new IdentityProvider(domain, service, getLoadBalancerName(loadBalancerName, configServerSpecs));
cluster.addComponent(identityProvider);
cluster.getContainers().forEach(container -> {
container.setProp("identity.domain", domain.value());
container.setProp("identity.service", service.value());
});
});
}
private HostName getLoadBalancerName(HostName loadbalancerName, List<ConfigServerSpec> configServerSpecs) {
return Optional.ofNullable(loadbalancerName)
.orElseGet(
() -> HostName.from(configServerSpecs.stream()
.findFirst()
.map(ConfigServerSpec::getHostName)
.orElse("unknown")
));
}
/**
* Disallow renderers named "DefaultRenderer" or "JsonRenderer"
*/
private static void validateRendererElement(Element element) {
String idAttr = element.getAttribute("id");
if (idAttr.equals(xmlRendererId) || idAttr.equals(jsonRendererId)) {
throw new IllegalArgumentException(String.format("Renderer id %s is reserved for internal use", idAttr));
}
}
} | class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
/**
* Default path to vip status file for container in Hosted Vespa.
*/
static final String HOSTED_VESPA_STATUS_FILE = Defaults.getDefaults().underVespaHome("var/mediasearch/oor/status.html");
/**
* Path to vip status file for container in Hosted Vespa. Only used if set, else use HOSTED_VESPA_STATUS_FILE
*/
private static final String HOSTED_VESPA_STATUS_FILE_INSTALL_SETTING = "cloudconfig_server__tenant_vip_status_file";
public enum Networking { disable, enable }
private ApplicationPackage app;
private final boolean standaloneBuilder;
private final Networking networking;
private final boolean rpcServerEnabled;
private final boolean httpServerEnabled;
protected DeployLogger log;
public static final List<ConfigModelId> configModelIds =
ImmutableList.of(ConfigModelId.fromName("container"), ConfigModelId.fromName("jdisc"));
private static final String xmlRendererId = RendererRegistry.xmlRendererId.getName();
private static final String jsonRendererId = RendererRegistry.jsonRendererId.getName();
public ContainerModelBuilder(boolean standaloneBuilder, Networking networking) {
super(ContainerModel.class);
this.standaloneBuilder = standaloneBuilder;
this.networking = networking;
this.rpcServerEnabled = !standaloneBuilder;
this.httpServerEnabled = networking == Networking.enable;
}
@Override
public List<ConfigModelId> handlesElements() {
return configModelIds;
}
@Override
public void doBuild(ContainerModel model, Element spec, ConfigModelContext modelContext) {
app = modelContext.getApplicationPackage();
checkVersion(spec);
this.log = modelContext.getDeployLogger();
ContainerCluster cluster = createContainerCluster(spec, modelContext);
addClusterContent(cluster, spec, modelContext);
addBundlesForPlatformComponents(cluster);
cluster.setRpcServerEnabled(rpcServerEnabled);
cluster.setHttpServerEnabled(httpServerEnabled);
model.setCluster(cluster);
}
protected void addBundlesForPlatformComponents(ContainerCluster cluster) {
for (Component<?, ?> component : cluster.getAllComponents()) {
String componentClass = component.model.bundleInstantiationSpec.getClassName();
BundleMapper.getBundlePath(componentClass).
ifPresent(cluster::addPlatformBundle);
}
}
private ContainerCluster createContainerCluster(Element spec, final ConfigModelContext modelContext) {
return new VespaDomBuilder.DomConfigProducerBuilder<ContainerCluster>() {
@Override
protected ContainerCluster doBuild(AbstractConfigProducer ancestor, Element producerSpec) {
return new ContainerCluster(ancestor, modelContext.getProducerId(), modelContext.getProducerId());
}
}.build(modelContext.getParentProducer(), spec);
}
private void addSecretStore(ContainerCluster cluster, Element spec) {
Element secretStoreElement = XML.getChild(spec, "secret-store");
if (secretStoreElement != null) {
SecretStore secretStore = new SecretStore();
for (Element group : XML.getChildren(secretStoreElement, "group")) {
secretStore.addGroup(group.getAttribute("name"), group.getAttribute("environment"));
}
cluster.setSecretStore(secretStore);
}
}
private void addAthensCopperArgos(ContainerCluster cluster, ConfigModelContext context) {
app.getDeployment().map(DeploymentSpec::fromXml)
.ifPresent(deploymentSpec -> {
addIdentityProvider(cluster,
context.getDeployState().getProperties().configServerSpecs(),
context.getDeployState().getProperties().loadBalancerName(),
context.getDeployState().zone(),
deploymentSpec);
addRotationProperties(cluster, context.getDeployState().zone(), context.getDeployState().getRotations(), deploymentSpec);
});
}
private void addRotationProperties(ContainerCluster cluster, Zone zone, Set<Rotation> rotations, DeploymentSpec spec) {
cluster.getContainers().forEach(container -> {
setRotations(container, rotations, spec.globalServiceId(), cluster.getName());
container.setProp("activeRotation", Boolean.toString(zoneHasActiveRotation(zone, spec)));
});
}
private boolean zoneHasActiveRotation(Zone zone, DeploymentSpec spec) {
return spec.zones().stream()
.anyMatch(declaredZone -> declaredZone.deploysTo(zone.environment(), Optional.of(zone.region())) &&
declaredZone.active());
}
private void setRotations(Container container, Set<Rotation> rotations, Optional<String> globalServiceId, String containerClusterName) {
if ( ! rotations.isEmpty() && globalServiceId.isPresent()) {
if (containerClusterName.equals(globalServiceId.get())) {
container.setProp("rotations", rotations.stream().map(Rotation::getId).collect(Collectors.joining(",")));
}
}
}
private void addRoutingAliases(ContainerCluster cluster, Element spec, Environment environment) {
if (environment != Environment.prod) return;
Element aliases = XML.getChild(spec, "aliases");
for (Element alias : XML.getChildren(aliases, "service-alias")) {
cluster.serviceAliases().add(XML.getValue(alias));
}
for (Element alias : XML.getChildren(aliases, "endpoint-alias")) {
cluster.endpointAliases().add(XML.getValue(alias));
}
}
private void addConfiguredComponents(ContainerCluster cluster, Element spec) {
for (Element components : XML.getChildren(spec, "components")) {
addIncludes(components);
addConfiguredComponents(cluster, components, "component");
}
addConfiguredComponents(cluster, spec, "component");
}
protected void setDefaultMetricConsumerFactory(ContainerCluster cluster) {
cluster.setDefaultMetricConsumerFactory(MetricDefaultsConfig.Factory.Enum.STATE_MONITOR);
}
protected void addDefaultHandlers(ContainerCluster cluster) {
addDefaultHandlersExceptStatus(cluster);
}
protected void addStatusHandlers(ContainerCluster cluster, ConfigModelContext configModelContext) {
if (configModelContext.getDeployState().isHosted()) {
String name = "status.html";
Optional<String> statusFile = Optional.ofNullable(System.getenv(HOSTED_VESPA_STATUS_FILE_INSTALL_SETTING));
cluster.addComponent(
new FileStatusHandlerComponent(name + "-status-handler", statusFile.orElse(HOSTED_VESPA_STATUS_FILE),
"http:
} else {
cluster.addVipHandler();
}
}
/**
* Intended for use by legacy builders only.
* Will be called during building when using ContainerModelBuilder.
*/
public static void addDefaultHandler_legacyBuilder(ContainerCluster cluster) {
addDefaultHandlersExceptStatus(cluster);
cluster.addVipHandler();
}
protected static void addDefaultHandlersExceptStatus(ContainerCluster cluster) {
cluster.addDefaultRootHandler();
cluster.addMetricStateHandler();
cluster.addApplicationStatusHandler();
cluster.addStatisticsHandler();
}
private void addClientProviders(Element spec, ContainerCluster cluster) {
for (Element clientSpec: XML.getChildren(spec, "client")) {
cluster.addComponent(new DomClientProviderBuilder().build(cluster, clientSpec));
}
}
private void addServerProviders(Element spec, ContainerCluster cluster) {
addConfiguredComponents(cluster, spec, "server");
}
private void addLegacyFilters(Element spec, ContainerCluster cluster) {
for (Component component : buildLegacyFilters(cluster, spec)) {
cluster.addComponent(component);
}
}
private List<Component> buildLegacyFilters(AbstractConfigProducer ancestor,
Element spec) {
List<Component> components = new ArrayList<>();
for (Element node : XML.getChildren(spec, "filter")) {
components.add(new DomFilterBuilder().build(ancestor, node));
}
return components;
}
protected void addAccessLogs(ContainerCluster cluster, Element spec) {
List<Element> accessLogElements = getAccessLogElements(spec);
for (Element accessLog : accessLogElements) {
AccessLogBuilder.buildIfNotDisabled(cluster, accessLog).ifPresent(cluster::addComponent);
}
if (accessLogElements.isEmpty() && cluster.getSearch() != null)
cluster.addDefaultSearchAccessLog();
}
protected final List<Element> getAccessLogElements(Element spec) {
return XML.getChildren(spec, "accesslog");
}
protected void addHttp(Element spec, ContainerCluster cluster) {
Element httpElement = XML.getChild(spec, "http");
if (httpElement != null) {
cluster.setHttp(buildHttp(cluster, httpElement));
}
}
private Http buildHttp(ContainerCluster cluster, Element httpElement) {
Http http = new HttpBuilder().build(cluster, httpElement);
if (networking == Networking.disable)
http.removeAllServers();
return http;
}
protected void addRestApis(Element spec, ContainerCluster cluster) {
for (Element restApiElem : XML.getChildren(spec, "rest-api")) {
cluster.addRestApi(
new RestApiBuilder().build(cluster, restApiElem));
}
}
private void addServlets(Element spec, ContainerCluster cluster) {
for (Element servletElem : XML.getChildren(spec, "servlet")) {
cluster.addServlet(
new ServletBuilder().build(cluster, servletElem));
}
}
private void addDocumentApi(Element spec, ContainerCluster cluster) {
ContainerDocumentApi containerDocumentApi = buildDocumentApi(cluster, spec);
if (containerDocumentApi != null) {
cluster.setDocumentApi(containerDocumentApi);
}
}
private void addDocproc(Element spec, ContainerCluster cluster) {
ContainerDocproc containerDocproc = buildDocproc(cluster, spec);
if (containerDocproc != null) {
cluster.setDocproc(containerDocproc);
ContainerDocproc.Options docprocOptions = containerDocproc.options;
cluster.setMbusParams(new ContainerCluster.MbusParams(
docprocOptions.maxConcurrentFactor, docprocOptions.documentExpansionFactor, docprocOptions.containerCoreMemory));
}
}
private void addSearch(Element spec, ContainerCluster cluster, QueryProfiles queryProfiles, SemanticRules semanticRules) {
Element searchElement = XML.getChild(spec, "search");
if (searchElement != null) {
addIncludes(searchElement);
cluster.setSearch(buildSearch(cluster, searchElement, queryProfiles, semanticRules));
addSearchHandler(cluster, searchElement);
validateAndAddConfiguredComponents(cluster, searchElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
}
private void addProcessing(Element spec, ContainerCluster cluster) {
Element processingElement = XML.getChild(spec, "processing");
if (processingElement != null) {
addIncludes(processingElement);
cluster.setProcessingChains(new DomProcessingBuilder(null).build(cluster, processingElement),
serverBindings(processingElement, ProcessingChains.defaultBindings));
validateAndAddConfiguredComponents(cluster, processingElement, "renderer", ContainerModelBuilder::validateRendererElement);
}
}
private ContainerSearch buildSearch(ContainerCluster containerCluster, Element producerSpec,
QueryProfiles queryProfiles, SemanticRules semanticRules) {
SearchChains searchChains = new DomSearchChainsBuilder(null, false).build(containerCluster, producerSpec);
ContainerSearch containerSearch = new ContainerSearch(containerCluster, searchChains, new ContainerSearch.Options());
applyApplicationPackageDirectoryConfigs(containerCluster.getRoot().getDeployState().getApplicationPackage(), containerSearch);
containerSearch.setQueryProfiles(queryProfiles);
containerSearch.setSemanticRules(semanticRules);
return containerSearch;
}
private void applyApplicationPackageDirectoryConfigs(ApplicationPackage applicationPackage,ContainerSearch containerSearch) {
PageTemplates.validate(applicationPackage);
containerSearch.setPageTemplates(PageTemplates.create(applicationPackage));
}
private void addHandlers(ContainerCluster cluster, Element spec) {
for (Element component: XML.getChildren(spec, "handler")) {
cluster.addComponent(
new DomHandlerBuilder().build(cluster, component));
}
}
private void checkVersion(Element spec) {
String version = spec.getAttribute("version");
if ( ! Version.fromString(version).equals(new Version(1))) {
throw new RuntimeException("Expected container version to be 1.0, but got " + version);
}
}
private void addNodes(ContainerCluster cluster, Element spec, ConfigModelContext context) {
if (standaloneBuilder)
addStandaloneNode(cluster);
else
addNodesFromXml(cluster, spec, context);
}
private void addStandaloneNode(ContainerCluster cluster) {
Container container = new Container(cluster, "standalone", cluster.getContainers().size());
cluster.addContainers(Collections.singleton(container));
}
private void addNodesFromXml(ContainerCluster cluster, Element containerElement, ConfigModelContext context) {
Element nodesElement = XML.getChild(containerElement, "nodes");
if (nodesElement == null) {
Container node = new Container(cluster, "container.0", 0);
HostResource host = allocateSingleNodeHost(cluster, log, containerElement, context);
node.setHostResource(host);
node.initService();
cluster.addContainers(Collections.singleton(node));
}
else {
List<Container> nodes = createNodes(cluster, nodesElement, context);
applyNodesTagJvmArgs(nodes, nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME));
applyRoutingAliasProperties(nodes, cluster);
applyDefaultPreload(nodes, nodesElement);
applyMemoryPercentage(cluster, nodesElement.getAttribute(VespaDomBuilder.Allocated_MEMORY_ATTRIB_NAME));
if (useCpuSocketAffinity(nodesElement))
AbstractService.distributeCpuSocketAffinity(nodes);
cluster.addContainers(nodes);
}
}
private List<Container> createNodes(ContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
if (nodesElement.hasAttribute("count"))
return createNodesFromNodeCount(cluster, nodesElement, context);
else if (nodesElement.hasAttribute("type"))
return createNodesFromNodeType(cluster, nodesElement, context);
else if (nodesElement.hasAttribute("of"))
return createNodesFromContentServiceReference(cluster, nodesElement, context);
else
return createNodesFromNodeList(cluster, nodesElement);
}
private void applyRoutingAliasProperties(List<Container> result, ContainerCluster cluster) {
if (!cluster.serviceAliases().isEmpty()) {
result.forEach(container -> {
container.setProp("servicealiases", cluster.serviceAliases().stream().collect(Collectors.joining(",")));
});
}
if (!cluster.endpointAliases().isEmpty()) {
result.forEach(container -> {
container.setProp("endpointaliases", cluster.endpointAliases().stream().collect(Collectors.joining(",")));
});
}
}
private void applyMemoryPercentage(ContainerCluster cluster, String memoryPercentage) {
if (memoryPercentage == null || memoryPercentage.isEmpty()) return;
memoryPercentage = memoryPercentage.trim();
if ( ! memoryPercentage.endsWith("%"))
throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster +
" must be an integer percentage ending by the '%' sign");
memoryPercentage = memoryPercentage.substring(0, memoryPercentage.length()-1).trim();
try {
cluster.setMemoryPercentage(Optional.of(Integer.parseInt(memoryPercentage)));
}
catch (NumberFormatException e) {
throw new IllegalArgumentException("The memory percentage given for nodes in " + cluster +
" must be an integer percentage ending by the '%' sign");
}
}
/** Creates a single host when there is no nodes tag */
private HostResource allocateSingleNodeHost(ContainerCluster cluster, DeployLogger logger, Element containerElement, ConfigModelContext context) {
if (cluster.getRoot().getDeployState().isHosted()) {
Optional<HostResource> singleContentHost = getHostResourceFromContentClusters(cluster, containerElement, context);
if (singleContentHost.isPresent()) {
return singleContentHost.get();
}
else {
ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container,
ClusterSpec.Id.from(cluster.getName()),
context.getDeployState().getWantedNodeVespaVersion(),
false);
return cluster.getHostSystem().allocateHosts(clusterSpec, Capacity.fromNodeCount(1), 1, logger).keySet().iterator().next();
}
} else {
return cluster.getHostSystem().getHost(Container.SINGLENODE_CONTAINER_SERVICESPEC);
}
}
private List<Container> createNodesFromNodeCount(ContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodesSpecification nodesSpecification = NodesSpecification.from(new ModelElement(nodesElement),
context.getDeployState().getWantedNodeVespaVersion());
Map<HostResource, ClusterMembership> hosts = nodesSpecification.provision(cluster.getRoot().getHostSystem(),
ClusterSpec.Type.container,
ClusterSpec.Id.from(cluster.getName()),
log);
return createNodesFromHosts(hosts, cluster);
}
private List<Container> createNodesFromNodeType(ContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
NodeType type = NodeType.valueOf(nodesElement.getAttribute("type"));
ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container,
ClusterSpec.Id.from(cluster.getName()),
context.getDeployState().getWantedNodeVespaVersion(),
false);
Map<HostResource, ClusterMembership> hosts =
cluster.getRoot().getHostSystem().allocateHosts(clusterSpec,
Capacity.fromRequiredNodeType(type), 1, log);
return createNodesFromHosts(hosts, cluster);
}
private List<Container> createNodesFromContentServiceReference(ContainerCluster cluster, Element nodesElement, ConfigModelContext context) {
String referenceId = nodesElement.getAttribute("of");
Element services = servicesRootOf(nodesElement).orElseThrow(() -> clusterReferenceNotFoundException(cluster, referenceId));
Element referencedService = findChildById(services, referenceId).orElseThrow(() -> clusterReferenceNotFoundException(cluster, referenceId));
if ( ! referencedService.getTagName().equals("content"))
throw new IllegalArgumentException(cluster + " references service '" + referenceId + "', " +
"but that is not a content service");
Element referencedNodesElement = XML.getChild(referencedService, "nodes");
if (referencedNodesElement == null)
throw new IllegalArgumentException(cluster + " references service '" + referenceId + "' to supply nodes, " +
"but that service has no <nodes> element");
cluster.setHostClusterId(referenceId);
Map<HostResource, ClusterMembership> hosts =
StorageGroup.provisionHosts(NodesSpecification.from(new ModelElement(referencedNodesElement),
context.getDeployState().getWantedNodeVespaVersion()),
referenceId,
cluster.getRoot().getHostSystem(),
context.getDeployLogger());
return createNodesFromHosts(hosts, cluster);
}
/**
* This is used in case we are on hosted Vespa and no nodes tag is supplied:
* If there are content clusters this will pick the first host in the first cluster as the container node.
* If there are no content clusters this will return empty (such that the node can be created by the container here).
*/
private Optional<HostResource> getHostResourceFromContentClusters(ContainerCluster cluster, Element containersElement, ConfigModelContext context) {
Optional<Element> services = servicesRootOf(containersElement);
if ( ! services.isPresent())
return Optional.empty();
List<Element> contentServices = XML.getChildren(services.get(), "content");
if ( contentServices.isEmpty() ) return Optional.empty();
Element contentNodesElementOrNull = XML.getChild(contentServices.get(0), "nodes");
NodesSpecification nodesSpec;
if (contentNodesElementOrNull == null)
nodesSpec = NodesSpecification.nonDedicated(1, context.getDeployState().getWantedNodeVespaVersion());
else
nodesSpec = NodesSpecification.from(new ModelElement(contentNodesElementOrNull), context.getDeployState().getWantedNodeVespaVersion());
Map<HostResource, ClusterMembership> hosts =
StorageGroup.provisionHosts(nodesSpec,
contentServices.get(0).getAttribute("id"),
cluster.getRoot().getHostSystem(),
context.getDeployLogger());
return Optional.of(hosts.keySet().iterator().next());
}
/** Returns the services element above the given Element, or empty if there is no services element */
private Optional<Element> servicesRootOf(Element element) {
Node parent = element.getParentNode();
if (parent == null) return Optional.empty();
if ( ! (parent instanceof Element)) return Optional.empty();
Element parentElement = (Element)parent;
if (parentElement.getTagName().equals("services")) return Optional.of(parentElement);
return servicesRootOf(parentElement);
}
private List<Container> createNodesFromHosts(Map<HostResource, ClusterMembership> hosts, ContainerCluster cluster) {
List<Container> nodes = new ArrayList<>();
for (Map.Entry<HostResource, ClusterMembership> entry : hosts.entrySet()) {
String id = "container." + entry.getValue().index();
Container container = new Container(cluster, id, entry.getValue().retired(), entry.getValue().index());
container.setHostResource(entry.getKey());
container.initService();
nodes.add(container);
}
return nodes;
}
private List<Container> createNodesFromNodeList(ContainerCluster cluster, Element nodesElement) {
List<Container> nodes = new ArrayList<>();
int nodeIndex = 0;
for (Element nodeElem: XML.getChildren(nodesElement, "node")) {
nodes.add(new ContainerServiceBuilder("container." + nodeIndex, nodeIndex).build(cluster, nodeElem));
nodeIndex++;
}
return nodes;
}
private IllegalArgumentException clusterReferenceNotFoundException(ContainerCluster cluster, String referenceId) {
return new IllegalArgumentException(cluster + " references service '" + referenceId +
"' but this service is not defined");
}
private Optional<Element> findChildById(Element parent, String id) {
for (Element child : XML.getChildren(parent))
if (id.equals(child.getAttribute("id"))) return Optional.of(child);
return Optional.empty();
}
private boolean useCpuSocketAffinity(Element nodesElement) {
if (nodesElement.hasAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME))
return Boolean.parseBoolean(nodesElement.getAttribute(VespaDomBuilder.CPU_SOCKET_AFFINITY_ATTRIB_NAME));
else
return false;
}
private void applyNodesTagJvmArgs(List<Container> containers, String nodesTagJvnArgs) {
for (Container container: containers) {
if (container.getAssignedJvmArgs().isEmpty())
container.prependJvmArgs(nodesTagJvnArgs);
}
}
private void applyDefaultPreload(List<Container> containers, Element nodesElement) {
if (! nodesElement.hasAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME)) return;
for (Container container: containers)
container.setPreLoad(nodesElement.getAttribute(VespaDomBuilder.PRELOAD_ATTRIB_NAME));
}
private void addSearchHandler(ContainerCluster cluster, Element searchElement) {
ProcessingHandler<SearchChains> searchHandler = new ProcessingHandler<>(
cluster.getSearch().getChains(), "com.yahoo.search.handler.SearchHandler");
String[] defaultBindings = {"http:
for (String binding: serverBindings(searchElement, defaultBindings)) {
searchHandler.addServerBindings(binding);
}
cluster.addComponent(searchHandler);
}
private String[] serverBindings(Element searchElement, String... defaultBindings) {
List<Element> bindings = XML.getChildren(searchElement, "binding");
if (bindings.isEmpty())
return defaultBindings;
return toBindingList(bindings);
}
private String[] toBindingList(List<Element> bindingElements) {
List<String> result = new ArrayList<>();
for (Element element: bindingElements) {
String text = element.getTextContent().trim();
if (!text.isEmpty())
result.add(text);
}
return result.toArray(new String[result.size()]);
}
private ContainerDocumentApi buildDocumentApi(ContainerCluster cluster, Element spec) {
Element documentApiElement = XML.getChild(spec, "document-api");
if (documentApiElement == null) return null;
ContainerDocumentApi.Options documentApiOptions = DocumentApiOptionsBuilder.build(documentApiElement);
return new ContainerDocumentApi(cluster, documentApiOptions);
}
private ContainerDocproc buildDocproc(ContainerCluster cluster, Element spec) {
Element docprocElement = XML.getChild(spec, "document-processing");
if (docprocElement == null)
return null;
addIncludes(docprocElement);
DocprocChains chains = new DomDocprocChainsBuilder(null, false).build(cluster, docprocElement);
ContainerDocproc.Options docprocOptions = DocprocOptionsBuilder.build(docprocElement);
return new ContainerDocproc(cluster, chains, docprocOptions, !standaloneBuilder);
}
private void addIncludes(Element parentElement) {
List<Element> includes = XML.getChildren(parentElement, IncludeDirs.INCLUDE);
if (includes == null || includes.isEmpty()) {
return;
}
if (app == null) {
throw new IllegalArgumentException("Element <include> given in XML config, but no application package given.");
}
for (Element include : includes) {
addInclude(parentElement, include);
}
}
private void addInclude(Element parentElement, Element include) {
String dirName = include.getAttribute(IncludeDirs.DIR);
app.validateIncludeDir(dirName);
List<Element> includedFiles = Xml.allElemsFromPath(app, dirName);
for (Element includedFile : includedFiles) {
List<Element> includedSubElements = XML.getChildren(includedFile);
for (Element includedSubElement : includedSubElements) {
Node copiedNode = parentElement.getOwnerDocument().importNode(includedSubElement, true);
parentElement.appendChild(copiedNode);
}
}
}
public static void addConfiguredComponents(ContainerCluster cluster, Element spec, String componentName) {
for (Element node : XML.getChildren(spec, componentName)) {
cluster.addComponent(new DomComponentBuilder().build(cluster, node));
}
}
public static void validateAndAddConfiguredComponents(ContainerCluster cluster, Element spec, String componentName, Consumer<Element> elementValidator) {
for (Element node : XML.getChildren(spec, componentName)) {
elementValidator.accept(node);
cluster.addComponent(new DomComponentBuilder().build(cluster, node));
}
}
private void addIdentityProvider(ContainerCluster cluster, List<ConfigServerSpec> configServerSpecs, HostName loadBalancerName, Zone zone, DeploymentSpec spec) {
spec.athenzDomain().ifPresent(domain -> {
AthenzService service = spec.athenzService(zone.environment(), zone.region())
.orElseThrow(() -> new RuntimeException("Missing Athenz service configuration"));
IdentityProvider identityProvider = new IdentityProvider(domain, service, getLoadBalancerName(loadBalancerName, configServerSpecs));
cluster.addComponent(identityProvider);
cluster.getContainers().forEach(container -> {
container.setProp("identity.domain", domain.value());
container.setProp("identity.service", service.value());
});
});
}
private HostName getLoadBalancerName(HostName loadbalancerName, List<ConfigServerSpec> configServerSpecs) {
return Optional.ofNullable(loadbalancerName)
.orElseGet(
() -> HostName.from(configServerSpecs.stream()
.findFirst()
.map(ConfigServerSpec::getHostName)
.orElse("unknown")
));
}
/**
* Disallow renderers named "DefaultRenderer" or "JsonRenderer"
*/
private static void validateRendererElement(Element element) {
String idAttr = element.getAttribute("id");
if (idAttr.equals(xmlRendererId) || idAttr.equals(jsonRendererId)) {
throw new IllegalArgumentException(String.format("Renderer id %s is reserved for internal use", idAttr));
}
}
} |
Please update message. | public boolean storeLastPublishedStateBundle(ClusterStateBundle stateBundle) throws InterruptedException {
EnvelopedClusterStateBundleCodec envelopedBundleCodec = new SlimeClusterStateBundleCodec();
byte[] encodedBundle = envelopedBundleCodec.encodeWithEnvelope(stateBundle);
try{
log.log(LogLevel.DEBUG, () -> String.format("Fleetcontroller %d: Storing published state bundle %s at '%spublished_state_bundle'",
nodeIndex, stateBundle, zooKeeperRoot));
session.setData(zooKeeperRoot + "published_state_bundle", encodedBundle, -1);
} catch (InterruptedException e) {
throw (InterruptedException) new InterruptedException("Interrupted").initCause(e);
} catch (Exception e) {
maybeLogExceptionWarning(e, "Failed to store start timestamps in ZooKeeper");
return false;
}
return true;
} | maybeLogExceptionWarning(e, "Failed to store start timestamps in ZooKeeper"); | public boolean storeLastPublishedStateBundle(ClusterStateBundle stateBundle) throws InterruptedException {
EnvelopedClusterStateBundleCodec envelopedBundleCodec = new SlimeClusterStateBundleCodec();
byte[] encodedBundle = envelopedBundleCodec.encodeWithEnvelope(stateBundle);
try{
log.log(LogLevel.DEBUG, () -> String.format("Fleetcontroller %d: Storing published state bundle %s at '%spublished_state_bundle'",
nodeIndex, stateBundle, zooKeeperRoot));
session.setData(zooKeeperRoot + "published_state_bundle", encodedBundle, -1);
} catch (InterruptedException e) {
throw (InterruptedException) new InterruptedException("Interrupted").initCause(e);
} catch (Exception e) {
maybeLogExceptionWarning(e, "Failed to store last published cluster state bundle in ZooKeeper");
return false;
}
return true;
} | class ZooKeeperWatcher implements Watcher {
private Event.KeeperState state = null;
public Event.KeeperState getState() { return (state == null ? Event.KeeperState.SyncConnected : state); }
public void process(WatchedEvent watchedEvent) {
if (state != null && state.equals(Event.KeeperState.Expired)) {
log.log(LogLevel.WARNING, "Fleetcontroller " + nodeIndex + ": Got event from ZooKeeper session after it expired");
return;
}
Event.KeeperState newState = watchedEvent.getState();
if (state == null || !state.equals(newState)) switch (newState) {
case Expired:
log.log(LogLevel.INFO, "Fleetcontroller " + nodeIndex + ": Zookeeper session expired");
sessionOpen = false;
listener.handleZooKeeperSessionDown();
break;
case Disconnected:
log.log(LogLevel.INFO, "Fleetcontroller " + nodeIndex + ": Lost connection to zookeeper server");
sessionOpen = false;
listener.handleZooKeeperSessionDown();
break;
case SyncConnected:
log.log(LogLevel.INFO, "Fleetcontroller " + nodeIndex + ": Connection to zookeeper server established. Refetching master data");
if (masterDataGatherer != null) {
masterDataGatherer.restart();
}
}
switch (watchedEvent.getType()) {
case NodeChildrenChanged:
log.log(LogLevel.WARNING, "Fleetcontroller " + nodeIndex + ": Got unexpected ZooKeeper event NodeChildrenChanged");
break;
case NodeDataChanged:
log.log(LogLevel.WARNING, "Fleetcontroller " + nodeIndex + ": Got unexpected ZooKeeper event NodeDataChanged");
break;
case NodeCreated:
log.log(LogLevel.WARNING, "Fleetcontroller " + nodeIndex + ": Got unexpected ZooKeeper event NodeCreated");
break;
case NodeDeleted:
log.log(LogLevel.WARNING, "Fleetcontroller " + nodeIndex + ": Got unexpected ZooKeeper event NodeDeleted");
break;
case None:
if (state != null && state.equals(watchedEvent.getState())) {
log.log(LogLevel.WARNING, "Fleetcontroller " + nodeIndex + ": Got None type event that didn't even alter session state. What does that indicate?");
}
}
state = watchedEvent.getState();
}
} | class ZooKeeperWatcher implements Watcher {
private Event.KeeperState state = null;
public Event.KeeperState getState() { return (state == null ? Event.KeeperState.SyncConnected : state); }
public void process(WatchedEvent watchedEvent) {
if (state != null && state.equals(Event.KeeperState.Expired)) {
log.log(LogLevel.WARNING, "Fleetcontroller " + nodeIndex + ": Got event from ZooKeeper session after it expired");
return;
}
Event.KeeperState newState = watchedEvent.getState();
if (state == null || !state.equals(newState)) switch (newState) {
case Expired:
log.log(LogLevel.INFO, "Fleetcontroller " + nodeIndex + ": Zookeeper session expired");
sessionOpen = false;
listener.handleZooKeeperSessionDown();
break;
case Disconnected:
log.log(LogLevel.INFO, "Fleetcontroller " + nodeIndex + ": Lost connection to zookeeper server");
sessionOpen = false;
listener.handleZooKeeperSessionDown();
break;
case SyncConnected:
log.log(LogLevel.INFO, "Fleetcontroller " + nodeIndex + ": Connection to zookeeper server established. Refetching master data");
if (masterDataGatherer != null) {
masterDataGatherer.restart();
}
}
switch (watchedEvent.getType()) {
case NodeChildrenChanged:
log.log(LogLevel.WARNING, "Fleetcontroller " + nodeIndex + ": Got unexpected ZooKeeper event NodeChildrenChanged");
break;
case NodeDataChanged:
log.log(LogLevel.WARNING, "Fleetcontroller " + nodeIndex + ": Got unexpected ZooKeeper event NodeDataChanged");
break;
case NodeCreated:
log.log(LogLevel.WARNING, "Fleetcontroller " + nodeIndex + ": Got unexpected ZooKeeper event NodeCreated");
break;
case NodeDeleted:
log.log(LogLevel.WARNING, "Fleetcontroller " + nodeIndex + ": Got unexpected ZooKeeper event NodeDeleted");
break;
case None:
if (state != null && state.equals(watchedEvent.getState())) {
log.log(LogLevel.WARNING, "Fleetcontroller " + nodeIndex + ": Got None type event that didn't even alter session state. What does that indicate?");
}
}
state = watchedEvent.getState();
}
} |
Is this needed? | public void can_store_latest_cluster_state_bundle() throws Exception {
Fixture f = new Fixture();
DatabaseHandler handler = f.createHandler();
handler.doNextZooKeeperTask(f.createMockContext());
handler.saveLatestClusterStateBundle(f.createMockContext(), f.dummyBundle);
handler.doNextZooKeeperTask(f.createMockContext());
verify(f.mockDatabase).storeLastPublishedStateBundle(eq(f.dummyBundle));
} | handler.doNextZooKeeperTask(f.createMockContext()); | public void can_store_latest_cluster_state_bundle() throws Exception {
Fixture f = new Fixture();
DatabaseHandler handler = f.createHandler();
handler.doNextZooKeeperTask(f.createMockContext());
handler.saveLatestClusterStateBundle(f.createMockContext(), f.dummyBundle);
verify(f.mockDatabase).storeLastPublishedStateBundle(eq(f.dummyBundle));
} | class Fixture {
final ClusterFixture clusterFixture = ClusterFixture.forFlatCluster(10);
final FleetController mockController = mock(FleetController.class);
final Database mockDatabase = mock(Database.class);
final Timer mockTimer = mock(Timer.class);
final DatabaseFactory mockDbFactory = (params) -> mockDatabase;
final String databaseAddress = "localhost:0";
final Object monitor = new Object();
final ClusterStateBundle dummyBundle;
Fixture() throws Exception {
dummyBundle = ClusterStateBundleUtil.makeBundle("distributor:2 storage:2",
StateMapping.of("default", "distributor:2 storage:2 .0.s:d"),
StateMapping.of("upsidedown", "distributor:2 .0.s:d storage:2"));
when(mockDatabase.isClosed()).thenReturn(false);
when(mockDatabase.storeMasterVote(anyInt())).thenReturn(true);
when(mockDatabase.storeLastPublishedStateBundle(any())).thenReturn(true);
when(mockTimer.getCurrentTimeInMillis()).thenReturn(1000000L);
}
DatabaseHandler.Context createMockContext() {
return new DatabaseHandler.Context() {
@Override
public ContentCluster getCluster() {
return clusterFixture.cluster();
}
@Override
public FleetController getFleetController() {
return mockController;
}
@Override
public NodeAddedOrRemovedListener getNodeAddedOrRemovedListener() {
return null;
}
@Override
public NodeStateOrHostInfoChangeHandler getNodeStateUpdateListener() {
return null;
}
};
}
DatabaseHandler createHandler() throws Exception {
return new DatabaseHandler(mockDbFactory, mockTimer, databaseAddress, 0, monitor);
}
} | class Fixture {
final ClusterFixture clusterFixture = ClusterFixture.forFlatCluster(10);
final FleetController mockController = mock(FleetController.class);
final Database mockDatabase = mock(Database.class);
final Timer mockTimer = mock(Timer.class);
final DatabaseFactory mockDbFactory = (params) -> mockDatabase;
final String databaseAddress = "localhost:0";
final Object monitor = new Object();
final ClusterStateBundle dummyBundle;
Fixture() throws Exception {
dummyBundle = ClusterStateBundleUtil.makeBundle("distributor:2 storage:2",
StateMapping.of("default", "distributor:2 storage:2 .0.s:d"),
StateMapping.of("upsidedown", "distributor:2 .0.s:d storage:2"));
when(mockDatabase.isClosed()).thenReturn(false);
when(mockDatabase.storeMasterVote(anyInt())).thenReturn(true);
when(mockDatabase.storeLastPublishedStateBundle(any())).thenReturn(true);
when(mockTimer.getCurrentTimeInMillis()).thenReturn(1000000L);
}
DatabaseHandler.Context createMockContext() {
return new DatabaseHandler.Context() {
@Override
public ContentCluster getCluster() {
return clusterFixture.cluster();
}
@Override
public FleetController getFleetController() {
return mockController;
}
@Override
public NodeAddedOrRemovedListener getNodeAddedOrRemovedListener() {
return null;
}
@Override
public NodeStateOrHostInfoChangeHandler getNodeStateUpdateListener() {
return null;
}
};
}
DatabaseHandler createHandler() throws Exception {
return new DatabaseHandler(mockDbFactory, mockTimer, databaseAddress, 0, monitor);
}
} |
:bug::shower: | public void notifyOfCompletion(JobReport report) {
log.log(LogLevel.INFO, String.format("Got notified of %s for %s of %s (%d).",
report.jobError().map(JobError::toString).orElse("success"),
report.jobType(),
report.applicationId(),
report.projectId()));
if ( ! applications().get(report.applicationId()).isPresent()) {
log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() +
"': Unknown application '" + report.applicationId() + "'");
return;
}
applications().lockOrThrow(report.applicationId(), application -> {
JobRun triggering;
if (report.jobType() == component) {
ApplicationVersion applicationVersion = ApplicationVersion.from(report.sourceRevision().get(), report.buildNumber());
triggering = JobRun.triggering(controller.systemVersion(), applicationVersion, empty(), empty(), "Application commit", clock.instant());
if (report.success()) {
if (acceptNewApplicationVersion(application))
application = application.withChange(application.change().with(applicationVersion))
.withOutstandingChange(Change.empty());
else
application = application.withOutstandingChange(Change.of(applicationVersion));
}
}
else triggering = application.deploymentJobs().statusOf(report.jobType()).flatMap(JobStatus::lastTriggered)
.orElseThrow(() -> new IllegalStateException("Got notified about completion of " + report.jobType().jobName() + " for " +
report.applicationId() + ", but that has neither been triggered nor deployed"));
applications().store(application.withJobCompletion(report.projectId(),
report.jobType(),
triggering.completion(report.buildNumber(), clock.instant()),
report.jobError()));
});
} | .withOutstandingChange(Change.empty()); | public void notifyOfCompletion(JobReport report) {
log.log(LogLevel.INFO, String.format("Got notified of %s for %s of %s (%d).",
report.jobError().map(JobError::toString).orElse("success"),
report.jobType(),
report.applicationId(),
report.projectId()));
if ( ! applications().get(report.applicationId()).isPresent()) {
log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() +
"': Unknown application '" + report.applicationId() + "'");
return;
}
applications().lockOrThrow(report.applicationId(), application -> {
JobRun triggering;
if (report.jobType() == component) {
ApplicationVersion applicationVersion = ApplicationVersion.from(report.sourceRevision().get(), report.buildNumber());
triggering = JobRun.triggering(controller.systemVersion(), applicationVersion, empty(), empty(), "Application commit", clock.instant());
if (report.success()) {
if (acceptNewApplicationVersion(application))
application = application.withChange(application.change().with(applicationVersion))
.withOutstandingChange(Change.empty());
else
application = application.withOutstandingChange(Change.of(applicationVersion));
}
}
else triggering = application.deploymentJobs().statusOf(report.jobType()).flatMap(JobStatus::lastTriggered)
.orElseThrow(() -> new IllegalStateException("Got notified about completion of " + report.jobType().jobName() + " for " +
report.applicationId() + ", but that has neither been triggered nor deployed"));
applications().store(application.withJobCompletion(report.projectId(),
report.jobType(),
triggering.completion(report.buildNumber(), clock.instant()),
report.jobError()));
});
} | class DeploymentTrigger {
private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName());
private final Controller controller;
private final Clock clock;
private final DeploymentOrder order;
private final BuildService buildService;
public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) {
Objects.requireNonNull(controller, "controller cannot be null");
Objects.requireNonNull(curator, "curator cannot be null");
Objects.requireNonNull(clock, "clock cannot be null");
this.controller = controller;
this.clock = clock;
this.order = new DeploymentOrder(controller::system);
this.buildService = buildService;
}
public DeploymentOrder deploymentOrder() {
return order;
}
/**
* Called each time a job completes (successfully or not) to record information used when deciding what to trigger.
*/
/**
* Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs.
*
* Only one job is triggered each run for test jobs, since their environments have limited capacity.
*/
public long triggerReadyJobs() {
return computeReadyJobs().collect(partitioningBy(job -> job.jobType().isTest()))
.entrySet().stream()
.flatMap(entry -> (entry.getKey()
? entry.getValue().stream()
.sorted(comparing(Job::isRetry)
.thenComparing(Job::applicationUpgrade)
.reversed()
.thenComparing(Job::availableSince))
.collect(groupingBy(Job::jobType))
: entry.getValue().stream()
.collect(groupingBy(Job::applicationId)))
.values().stream()
.map(jobs -> (Supplier<Long>) jobs.stream()
.filter(job -> canTrigger(job) && trigger(job))
.limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count))
.parallel().map(Supplier::get).reduce(0L, Long::sum);
}
/**
* Attempts to trigger the given job for the given application and returns the outcome.
*
* If the build service can not find the given job, or claims it is illegal to trigger it,
* the project id is removed from the application owning the job, to prevent further trigger attemps.
*/
public boolean trigger(Job job) {
log.log(LogLevel.INFO, String.format("Attempting to trigger %s: %s (%s)", job, job.reason, job.target));
try {
buildService.trigger(job);
applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withJobTriggering(
job.jobType, JobRun.triggering(job.target.targetPlatform, job.target.targetApplication,
job.target.sourcePlatform, job.target.sourceApplication,
job.reason, clock.instant()))));
return true;
}
catch (RuntimeException e) {
log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e);
if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException)
applications().lockOrThrow(job.applicationId(), application ->
applications().store(application.withProjectId(OptionalLong.empty())));
return false;
}
}
/**
* Triggers a change of this application
*
* @param applicationId the application to trigger
* @throws IllegalArgumentException if this application already has an ongoing change
*/
public void triggerChange(ApplicationId applicationId, Change change) {
applications().lockOrThrow(applicationId, application -> {
if (application.change().isPresent() && ! application.deploymentJobs().hasFailures())
throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " +
application.change() + " is already in progress");
application = application.withChange(change);
if (change.application().isPresent())
application = application.withOutstandingChange(Change.empty());
applications().store(application);
});
}
/** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */
public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) {
applications().lockOrThrow(applicationId, application -> {
applications().store(application.withChange(application.change().application()
.filter(__ -> keepApplicationChange)
.map(Change::of)
.orElse(Change.empty())));
});
}
public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() {
return computeReadyJobs().collect(groupingBy(Job::jobType));
}
/** Returns the set of all jobs which have changes to propagate from the upstream steps. */
private Stream<Job> computeReadyJobs() {
return ApplicationList.from(applications().asList())
.notPullRequest()
.withProjectId()
.deploying()
.idList().stream()
.map(this::computeReadyJobs)
.flatMap(List::stream);
}
/** Returns whether the given job is currently running; false if completed since last triggered, asking the build service otherwise. */
public boolean isRunning(Application application, JobType jobType) {
return ! application.deploymentJobs().statusOf(jobType)
.flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at())))
.orElse(false)
&& jobStateIsAmong(application, jobType, running, queued);
}
private boolean jobStateIsAmong(Application application, JobType jobType, JobState state, JobState... states) {
return EnumSet.of(state, states).contains(buildService.stateOf(BuildJob.of(application.id(),
application.deploymentJobs().projectId().getAsLong(),
jobType.jobName())));
}
public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType) {
Application application = applications().require(applicationId);
if (jobType == component) {
buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName()));
return singletonList(component);
}
State target = targetFor(application, application.change(), deploymentFor(application, jobType));
String reason = ">:o:< Triggered by force! (-o-) |-o-| (=oo=)";
if ( ! jobType.isProduction() || isTested(application, target)) {
trigger(deploymentJob(application, target, application.change(), jobType, reason, clock.instant(), Collections.emptySet()));
return singletonList(jobType);
}
List<Job> testJobs = testJobsFor(application, target, reason, clock.instant());
testJobs.forEach(this::trigger);
return testJobs.stream().map(Job::jobType).collect(toList());
}
private Job deploymentJob(Application application, State target, Change change, JobType jobType, String reason, Instant availableSince, Collection<JobType> concurrentlyWith) {
boolean isRetry = application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::jobError)
.filter(JobError.outOfCapacity::equals).isPresent();
if (isRetry) reason += "; retrying on out of capacity";
return new Job(application, target, change, jobType, reason, availableSince, concurrentlyWith, isRetry);
}
private Version targetPlatform(Application application, Change change, Optional<Deployment> deployment) {
return max(deployment.map(Deployment::version), change.platform())
.orElse(application.oldestDeployedPlatform()
.orElse(controller.systemVersion()));
}
private ApplicationVersion targetApplication(Application application, Change change, Optional<Deployment> deployment) {
return max(deployment.map(Deployment::applicationVersion), change.application())
.orElse(application.oldestDeployedApplication()
.orElse(application.deploymentJobs().jobStatus().get(component).lastSuccess().get().application()));
}
private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) {
return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2;
}
/**
* Finds the next step to trigger for the given application, if any, and returns these as a list.
*/
private List<Job> computeReadyJobs(ApplicationId id) {
List<Job> jobs = new ArrayList<>();
applications().get(id).ifPresent(application -> {
List<Step> productionSteps = application.deploymentSpec().steps().stream()
.filter(step -> step.deploysTo(prod) || step.zones().isEmpty())
.collect(toList());
Change change = application.changeAt(clock.instant());
@SuppressWarnings("cast")
Optional<Instant> completedAt = max((Optional<Instant>) application.deploymentJobs().statusOf(systemTest)
.flatMap(job -> job.lastSuccess().map(JobRun::at)),
(Optional<Instant>) application.deploymentJobs().statusOf(stagingTest)
.flatMap(job -> job.lastSuccess().map(JobRun::at)));
String reason = "New change available";
List<Job> testJobs = null;
if (change.isPresent())
for (Step step : productionSteps) {
Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet());
Map<Optional<Instant>, List<JobType>> jobsByCompletion = stepJobs.stream().collect(groupingBy(job -> completedAt(change, application, job)));
if (jobsByCompletion.containsKey(empty())) {
for (JobType job : jobsByCompletion.get(empty())) {
State target = targetFor(application, change, deploymentFor(application, job));
if (isTested(application, target)) {
if (completedAt.isPresent())
jobs.add(deploymentJob(application, target, change, job, reason, completedAt.get(), stepJobs));
}
else if (testJobs == null) {
if ( ! alreadyTriggered(application, target))
testJobs = testJobsFor(application, target, "Testing deployment for " + job.jobName(), completedAt.orElse(clock.instant()));
else
testJobs = emptyList();
}
}
}
else {
if (stepJobs.isEmpty()) {
Duration delay = ((DeploymentSpec.Delay) step).duration();
completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant()));
reason += " after a delay of " + delay;
}
else {
completedAt = jobsByCompletion.keySet().stream().map(Optional::get).max(naturalOrder());
reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", "));
}
}
}
if (testJobs == null)
testJobs = testJobsFor(application, targetFor(application, application.change(), empty()), "Testing last changes outside prod", clock.instant());
jobs.addAll(testJobs);
removeCompletedChange(application);
});
return jobs;
}
private void removeCompletedChange(Application application) {
List<JobType> jobs = (application.deploymentSpec().steps().isEmpty()
? singletonList(new DeploymentSpec.DeclaredZone(test))
: application.deploymentSpec().steps()).stream()
.flatMap(step -> step.zones().stream())
.map(order::toJob)
.collect(toList());
boolean platformComplete = application.change().platform().map(Change::of)
.map(change -> jobs.stream().allMatch(job -> completedAt(change, application, job).isPresent()))
.orElse(false);
boolean applicationComplete = application.change().application().map(Change::of)
.map(change -> jobs.stream().allMatch(job -> completedAt(change, application, job).isPresent()))
.orElse(false);
if (platformComplete || applicationComplete)
applications().lockIfPresent(application.id(), lockedApplication -> {
if ( ! application.change().equals(lockedApplication.change()))
return;
Change change = application.change();
if (platformComplete) change = change.withoutPlatform();
if (applicationComplete) change = change.withoutApplication();
applications().store(lockedApplication.withChange(change));
});
}
/**
* Returns the list of test jobs that should run now, and that need to succeed on the given target for it to be considered tested.
*/
private List<Job> testJobsFor(Application application, State target, String reason, Instant availableSince) {
List<Step> steps = application.deploymentSpec().steps();
if (steps.isEmpty()) steps = singletonList(new DeploymentSpec.DeclaredZone(test));
List<Job> jobs = new ArrayList<>();
for (Step step : steps.stream().filter(step -> step.deploysTo(test) || step.deploysTo(staging)).collect(toList())) {
for (JobType jobType : step.zones().stream().map(order::toJob).collect(toList())) {
Optional<JobRun> completion = successOn(application, jobType, target)
.filter(run -> jobType != stagingTest || sourcesMatchIfPresent(target, run));
if ( ! completion.isPresent())
jobs.add(deploymentJob(application, target, application.change(), jobType, reason, availableSince, emptySet()));
}
}
return jobs;
}
private boolean isTested(Application application, State target) {
return testedAt(application, target).isPresent()
|| alreadyTriggered(application, target);
}
/** If the given state's sources are present and differ from its targets, returns whether they are equal to those of the given job run. */
private static boolean sourcesMatchIfPresent(State target, JobRun jobRun) {
return ( ! target.sourcePlatform.filter(version -> ! version.equals(target.targetPlatform)).isPresent()
|| target.sourcePlatform.equals(jobRun.sourcePlatform()))
&& ( ! target.sourceApplication.filter(version -> ! version.equals(target.targetApplication)).isPresent()
|| target.sourceApplication.equals(jobRun.sourceApplication()));
}
private static boolean targetsMatch(State target, JobRun jobRun) {
return target.targetPlatform.equals(jobRun.platform()) && target.targetApplication.equals(jobRun.application());
}
private Optional<Instant> testedAt(Application application, State target) {
Optional<JobRun> testRun = successOn(application, systemTest, target);
Optional<JobRun> stagingRun = successOn(application, stagingTest, target)
.filter(run -> sourcesMatchIfPresent(target, run));
return max(testRun.map(JobRun::at), stagingRun.map(JobRun::at))
.filter(__ -> testRun.isPresent() && stagingRun.isPresent());
}
private boolean alreadyTriggered(Application application, State target) {
return application.deploymentJobs().jobStatus().values().stream()
.filter(job -> job.type().isProduction())
.anyMatch(job -> job.lastTriggered()
.filter(run -> targetsMatch(target, run))
.filter(run -> sourcesMatchIfPresent(target, run))
.isPresent());
}
/**
* Returns the instant when the given change is complete for the given application for the given job.
*
* Any job is complete if the given change is already successful on that job.
* A production job is also considered complete if its current change is strictly dominated by what
* is already deployed in its zone, i.e., no parts of the change are upgrades, and the full current
* change for the application downgrades the deployment, which is an acknowledgement that the deployed
* version is broken somehow, such that the job may be locked in failure until a new version is released.
*/
private Optional<Instant> completedAt(Change change, Application application, JobType jobType) {
State target = targetFor(application, change, deploymentFor(application, jobType));
Optional<JobRun> lastSuccess = successOn(application, jobType, target);
if (lastSuccess.isPresent() || ! jobType.isProduction())
return lastSuccess.map(JobRun::at);
return deploymentFor(application, jobType)
.filter(deployment -> ! ( change.upgrades(deployment.version())
|| change.upgrades(deployment.applicationVersion()))
&& ( application.change().downgrades(deployment.version())
|| application.change().downgrades(deployment.applicationVersion())))
.map(Deployment::at);
}
private Optional<JobRun> successOn(Application application, JobType jobType, State target) {
return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess)
.filter(run -> targetsMatch(target, run));
}
private boolean canTrigger(Job job) {
Application application = applications().require(job.applicationId());
if (isRunning(application, job.jobType) || jobStateIsAmong(application, job.jobType, disabled))
return false;
if (successOn(application, job.jobType, job.target).filter(run -> sourcesMatchIfPresent(job.target, run)).isPresent())
return false;
if ( ! job.jobType.isProduction())
return true;
if ( ! job.concurrentlyWith.containsAll(runningProductionJobsFor(application)))
return false;
return true;
}
private List<JobType> runningProductionJobsFor(Application application) {
return application.deploymentJobs().jobStatus().keySet().parallelStream()
.filter(job -> job.isProduction())
.filter(job -> isRunning(application, job))
.collect(toList());
}
private ApplicationController applications() {
return controller.applications();
}
private boolean acceptNewApplicationVersion(Application application) {
if (application.change().application().isPresent()) return true;
if (application.deploymentJobs().hasFailures()) return true;
return ! application.changeAt(clock.instant()).platform().isPresent();
}
private Optional<Deployment> deploymentFor(Application application, JobType jobType) {
return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()).get()));
}
private State targetFor(Application application, Change change, Optional<Deployment> deployment) {
return new State(targetPlatform(application, change, deployment),
targetApplication(application, change, deployment),
deployment.map(Deployment::version),
deployment.map(Deployment::applicationVersion));
}
private static class Job extends BuildJob {
private final Change change;
private final JobType jobType;
private final String reason;
private final Instant availableSince;
private final Collection<JobType> concurrentlyWith;
private final boolean isRetry;
private final boolean isApplicationUpgrade;
private final State target;
private Job(Application application, State target, Change change, JobType jobType, String reason, Instant availableSince, Collection<JobType> concurrentlyWith, boolean isRetry) {
super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName());
this.change = change;
this.jobType = jobType;
this.availableSince = availableSince;
this.concurrentlyWith = concurrentlyWith;
this.reason = reason;
this.isRetry = isRetry;
this.isApplicationUpgrade = change.application().isPresent();
this.target = target;
}
JobType jobType() { return jobType; }
Instant availableSince() { return availableSince; }
boolean isRetry() { return isRetry; }
boolean applicationUpgrade() { return isApplicationUpgrade; }
}
public static class State {
private final Version targetPlatform;
private final ApplicationVersion targetApplication;
private final Optional<Version> sourcePlatform;
private final Optional<ApplicationVersion> sourceApplication;
public State(Version targetPlatform, ApplicationVersion targetApplication, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication) {
this.targetPlatform = targetPlatform;
this.targetApplication = targetApplication;
this.sourcePlatform = sourcePlatform;
this.sourceApplication = sourceApplication;
}
public Version targetPlatform() { return targetPlatform; }
public ApplicationVersion targetApplication() { return targetApplication; }
public Optional<Version> sourcePlatform() { return sourcePlatform; }
public Optional<ApplicationVersion> sourceApplication() { return sourceApplication; }
@Override
public String toString() {
return String.format("platform %s%s, application %s%s",
targetPlatform,
sourcePlatform.filter(version -> ! version.equals(targetPlatform))
.map(v -> " (from " + v + ")").orElse(""),
targetApplication.id(),
sourceApplication.filter(version -> ! version.equals(targetApplication))
.map(v -> " (from " + v.id() + ")").orElse(""));
}
}
} | class DeploymentTrigger {
private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName());
private final Controller controller;
private final Clock clock;
private final DeploymentOrder order;
private final BuildService buildService;
public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) {
Objects.requireNonNull(controller, "controller cannot be null");
Objects.requireNonNull(curator, "curator cannot be null");
Objects.requireNonNull(clock, "clock cannot be null");
this.controller = controller;
this.clock = clock;
this.order = new DeploymentOrder(controller::system);
this.buildService = buildService;
}
public DeploymentOrder deploymentOrder() {
return order;
}
/**
* Called each time a job completes (successfully or not) to record information used when deciding what to trigger.
*/
/**
* Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs.
*
* Only one job is triggered each run for test jobs, since their environments have limited capacity.
*/
public long triggerReadyJobs() {
return computeReadyJobs().collect(partitioningBy(job -> job.jobType().isTest()))
.entrySet().stream()
.flatMap(entry -> (entry.getKey()
? entry.getValue().stream()
.sorted(comparing(Job::isRetry)
.thenComparing(Job::applicationUpgrade)
.reversed()
.thenComparing(Job::availableSince))
.collect(groupingBy(Job::jobType))
: entry.getValue().stream()
.collect(groupingBy(Job::applicationId)))
.values().stream()
.map(jobs -> (Supplier<Long>) jobs.stream()
.filter(job -> canTrigger(job) && trigger(job))
.limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count))
.parallel().map(Supplier::get).reduce(0L, Long::sum);
}
/**
* Attempts to trigger the given job for the given application and returns the outcome.
*
* If the build service can not find the given job, or claims it is illegal to trigger it,
* the project id is removed from the application owning the job, to prevent further trigger attemps.
*/
public boolean trigger(Job job) {
log.log(LogLevel.INFO, String.format("Attempting to trigger %s: %s (%s)", job, job.reason, job.target));
try {
buildService.trigger(job);
applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withJobTriggering(
job.jobType, JobRun.triggering(job.target.targetPlatform, job.target.targetApplication,
job.target.sourcePlatform, job.target.sourceApplication,
job.reason, clock.instant()))));
return true;
}
catch (RuntimeException e) {
log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e);
if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException)
applications().lockOrThrow(job.applicationId(), application ->
applications().store(application.withProjectId(OptionalLong.empty())));
return false;
}
}
/**
* Triggers a change of this application
*
* @param applicationId the application to trigger
* @throws IllegalArgumentException if this application already has an ongoing change
*/
public void triggerChange(ApplicationId applicationId, Change change) {
applications().lockOrThrow(applicationId, application -> {
if (application.change().isPresent() && ! application.deploymentJobs().hasFailures())
throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " +
application.change() + " is already in progress");
application = application.withChange(change);
if (change.application().isPresent())
application = application.withOutstandingChange(Change.empty());
applications().store(application);
});
}
/** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */
public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) {
applications().lockOrThrow(applicationId, application -> {
applications().store(application.withChange(application.change().application()
.filter(__ -> keepApplicationChange)
.map(Change::of)
.orElse(Change.empty())));
});
}
public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() {
return computeReadyJobs().collect(groupingBy(Job::jobType));
}
/** Returns the set of all jobs which have changes to propagate from the upstream steps. */
private Stream<Job> computeReadyJobs() {
return ApplicationList.from(applications().asList())
.notPullRequest()
.withProjectId()
.deploying()
.idList().stream()
.map(this::computeReadyJobs)
.flatMap(List::stream);
}
/** Returns whether the given job is currently running; false if completed since last triggered, asking the build service otherwise. */
public boolean isRunning(Application application, JobType jobType) {
return ! application.deploymentJobs().statusOf(jobType)
.flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at())))
.orElse(false)
&& jobStateIsAmong(application, jobType, running, queued);
}
private boolean jobStateIsAmong(Application application, JobType jobType, JobState state, JobState... states) {
return EnumSet.of(state, states).contains(buildService.stateOf(BuildJob.of(application.id(),
application.deploymentJobs().projectId().getAsLong(),
jobType.jobName())));
}
public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType) {
Application application = applications().require(applicationId);
if (jobType == component) {
buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName()));
return singletonList(component);
}
State target = targetFor(application, application.change(), deploymentFor(application, jobType));
String reason = ">:o:< Triggered by force! (-o-) |-o-| (=oo=)";
if ( ! jobType.isProduction() || isTested(application, target)) {
trigger(deploymentJob(application, target, application.change(), jobType, reason, clock.instant(), Collections.emptySet()));
return singletonList(jobType);
}
List<Job> testJobs = testJobsFor(application, target, reason, clock.instant());
testJobs.forEach(this::trigger);
return testJobs.stream().map(Job::jobType).collect(toList());
}
private Job deploymentJob(Application application, State target, Change change, JobType jobType, String reason, Instant availableSince, Collection<JobType> concurrentlyWith) {
boolean isRetry = application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::jobError)
.filter(JobError.outOfCapacity::equals).isPresent();
if (isRetry) reason += "; retrying on out of capacity";
return new Job(application, target, change, jobType, reason, availableSince, concurrentlyWith, isRetry);
}
private Version targetPlatform(Application application, Change change, Optional<Deployment> deployment) {
return max(deployment.map(Deployment::version), change.platform())
.orElse(application.oldestDeployedPlatform()
.orElse(controller.systemVersion()));
}
private ApplicationVersion targetApplication(Application application, Change change, Optional<Deployment> deployment) {
return max(deployment.map(Deployment::applicationVersion), change.application())
.orElse(application.oldestDeployedApplication()
.orElse(application.deploymentJobs().jobStatus().get(component).lastSuccess().get().application()));
}
private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) {
return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2;
}
/**
* Finds the next step to trigger for the given application, if any, and returns these as a list.
*/
private List<Job> computeReadyJobs(ApplicationId id) {
List<Job> jobs = new ArrayList<>();
applications().get(id).ifPresent(application -> {
List<Step> productionSteps = application.deploymentSpec().steps().stream()
.filter(step -> step.deploysTo(prod) || step.zones().isEmpty())
.collect(toList());
Change change = application.changeAt(clock.instant());
@SuppressWarnings("cast")
Optional<Instant> completedAt = max((Optional<Instant>) application.deploymentJobs().statusOf(systemTest)
.flatMap(job -> job.lastSuccess().map(JobRun::at)),
(Optional<Instant>) application.deploymentJobs().statusOf(stagingTest)
.flatMap(job -> job.lastSuccess().map(JobRun::at)));
String reason = "New change available";
List<Job> testJobs = null;
if (change.isPresent())
for (Step step : productionSteps) {
Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet());
Map<Optional<Instant>, List<JobType>> jobsByCompletion = stepJobs.stream().collect(groupingBy(job -> completedAt(change, application, job)));
if (jobsByCompletion.containsKey(empty())) {
for (JobType job : jobsByCompletion.get(empty())) {
State target = targetFor(application, change, deploymentFor(application, job));
if (isTested(application, target)) {
if (completedAt.isPresent())
jobs.add(deploymentJob(application, target, change, job, reason, completedAt.get(), stepJobs));
}
else if (testJobs == null) {
if ( ! alreadyTriggered(application, target))
testJobs = testJobsFor(application, target, "Testing deployment for " + job.jobName(), completedAt.orElse(clock.instant()));
else
testJobs = emptyList();
}
}
}
else {
if (stepJobs.isEmpty()) {
Duration delay = ((DeploymentSpec.Delay) step).duration();
completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant()));
reason += " after a delay of " + delay;
}
else {
completedAt = jobsByCompletion.keySet().stream().map(Optional::get).max(naturalOrder());
reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", "));
}
}
}
if (testJobs == null)
testJobs = testJobsFor(application, targetFor(application, application.change(), empty()), "Testing last changes outside prod", clock.instant());
jobs.addAll(testJobs);
removeCompletedChange(application);
});
return jobs;
}
private void removeCompletedChange(Application application) {
List<JobType> jobs = (application.deploymentSpec().steps().isEmpty()
? singletonList(new DeploymentSpec.DeclaredZone(test))
: application.deploymentSpec().steps()).stream()
.flatMap(step -> step.zones().stream())
.map(order::toJob)
.collect(toList());
boolean platformComplete = application.change().platform().map(Change::of)
.map(change -> jobs.stream().allMatch(job -> completedAt(change, application, job).isPresent()))
.orElse(false);
boolean applicationComplete = application.change().application().map(Change::of)
.map(change -> jobs.stream().allMatch(job -> completedAt(change, application, job).isPresent()))
.orElse(false);
if (platformComplete || applicationComplete)
applications().lockIfPresent(application.id(), lockedApplication -> {
if ( ! application.change().equals(lockedApplication.change()))
return;
Change change = application.change();
if (platformComplete) change = change.withoutPlatform();
if (applicationComplete) change = change.withoutApplication();
applications().store(lockedApplication.withChange(change));
});
}
/**
* Returns the list of test jobs that should run now, and that need to succeed on the given target for it to be considered tested.
*/
private List<Job> testJobsFor(Application application, State target, String reason, Instant availableSince) {
List<Step> steps = application.deploymentSpec().steps();
if (steps.isEmpty()) steps = singletonList(new DeploymentSpec.DeclaredZone(test));
List<Job> jobs = new ArrayList<>();
for (Step step : steps.stream().filter(step -> step.deploysTo(test) || step.deploysTo(staging)).collect(toList())) {
for (JobType jobType : step.zones().stream().map(order::toJob).collect(toList())) {
Optional<JobRun> completion = successOn(application, jobType, target)
.filter(run -> jobType != stagingTest || sourcesMatchIfPresent(target, run));
if ( ! completion.isPresent())
jobs.add(deploymentJob(application, target, application.change(), jobType, reason, availableSince, emptySet()));
}
}
return jobs;
}
private boolean isTested(Application application, State target) {
return testedAt(application, target).isPresent()
|| alreadyTriggered(application, target);
}
/** If the given state's sources are present and differ from its targets, returns whether they are equal to those of the given job run. */
private static boolean sourcesMatchIfPresent(State target, JobRun jobRun) {
return ( ! target.sourcePlatform.filter(version -> ! version.equals(target.targetPlatform)).isPresent()
|| target.sourcePlatform.equals(jobRun.sourcePlatform()))
&& ( ! target.sourceApplication.filter(version -> ! version.equals(target.targetApplication)).isPresent()
|| target.sourceApplication.equals(jobRun.sourceApplication()));
}
private static boolean targetsMatch(State target, JobRun jobRun) {
return target.targetPlatform.equals(jobRun.platform()) && target.targetApplication.equals(jobRun.application());
}
private Optional<Instant> testedAt(Application application, State target) {
Optional<JobRun> testRun = successOn(application, systemTest, target);
Optional<JobRun> stagingRun = successOn(application, stagingTest, target)
.filter(run -> sourcesMatchIfPresent(target, run));
return max(testRun.map(JobRun::at), stagingRun.map(JobRun::at))
.filter(__ -> testRun.isPresent() && stagingRun.isPresent());
}
private boolean alreadyTriggered(Application application, State target) {
return application.deploymentJobs().jobStatus().values().stream()
.filter(job -> job.type().isProduction())
.anyMatch(job -> job.lastTriggered()
.filter(run -> targetsMatch(target, run))
.filter(run -> sourcesMatchIfPresent(target, run))
.isPresent());
}
/**
* Returns the instant when the given change is complete for the given application for the given job.
*
* Any job is complete if the given change is already successful on that job.
* A production job is also considered complete if its current change is strictly dominated by what
* is already deployed in its zone, i.e., no parts of the change are upgrades, and the full current
* change for the application downgrades the deployment, which is an acknowledgement that the deployed
* version is broken somehow, such that the job may be locked in failure until a new version is released.
*/
private Optional<Instant> completedAt(Change change, Application application, JobType jobType) {
State target = targetFor(application, change, deploymentFor(application, jobType));
Optional<JobRun> lastSuccess = successOn(application, jobType, target);
if (lastSuccess.isPresent() || ! jobType.isProduction())
return lastSuccess.map(JobRun::at);
return deploymentFor(application, jobType)
.filter(deployment -> ! ( change.upgrades(deployment.version())
|| change.upgrades(deployment.applicationVersion()))
&& ( application.change().downgrades(deployment.version())
|| application.change().downgrades(deployment.applicationVersion())))
.map(Deployment::at);
}
private Optional<JobRun> successOn(Application application, JobType jobType, State target) {
return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess)
.filter(run -> targetsMatch(target, run));
}
private boolean canTrigger(Job job) {
Application application = applications().require(job.applicationId());
if (isRunning(application, job.jobType) || jobStateIsAmong(application, job.jobType, disabled))
return false;
if (successOn(application, job.jobType, job.target).filter(run -> sourcesMatchIfPresent(job.target, run)).isPresent())
return false;
if ( ! job.jobType.isProduction())
return true;
if ( ! job.concurrentlyWith.containsAll(runningProductionJobsFor(application)))
return false;
return true;
}
private List<JobType> runningProductionJobsFor(Application application) {
return application.deploymentJobs().jobStatus().keySet().parallelStream()
.filter(job -> job.isProduction())
.filter(job -> isRunning(application, job))
.collect(toList());
}
private ApplicationController applications() {
return controller.applications();
}
private boolean acceptNewApplicationVersion(Application application) {
if (application.change().application().isPresent()) return true;
if (application.deploymentJobs().hasFailures()) return true;
return ! application.changeAt(clock.instant()).platform().isPresent();
}
private Optional<Deployment> deploymentFor(Application application, JobType jobType) {
return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()).get()));
}
private State targetFor(Application application, Change change, Optional<Deployment> deployment) {
return new State(targetPlatform(application, change, deployment),
targetApplication(application, change, deployment),
deployment.map(Deployment::version),
deployment.map(Deployment::applicationVersion));
}
private static class Job extends BuildJob {
private final Change change;
private final JobType jobType;
private final String reason;
private final Instant availableSince;
private final Collection<JobType> concurrentlyWith;
private final boolean isRetry;
private final boolean isApplicationUpgrade;
private final State target;
private Job(Application application, State target, Change change, JobType jobType, String reason, Instant availableSince, Collection<JobType> concurrentlyWith, boolean isRetry) {
super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName());
this.change = change;
this.jobType = jobType;
this.availableSince = availableSince;
this.concurrentlyWith = concurrentlyWith;
this.reason = reason;
this.isRetry = isRetry;
this.isApplicationUpgrade = change.application().isPresent();
this.target = target;
}
JobType jobType() { return jobType; }
Instant availableSince() { return availableSince; }
boolean isRetry() { return isRetry; }
boolean applicationUpgrade() { return isApplicationUpgrade; }
}
public static class State {
private final Version targetPlatform;
private final ApplicationVersion targetApplication;
private final Optional<Version> sourcePlatform;
private final Optional<ApplicationVersion> sourceApplication;
public State(Version targetPlatform, ApplicationVersion targetApplication, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication) {
this.targetPlatform = targetPlatform;
this.targetApplication = targetApplication;
this.sourcePlatform = sourcePlatform;
this.sourceApplication = sourceApplication;
}
public Version targetPlatform() { return targetPlatform; }
public ApplicationVersion targetApplication() { return targetApplication; }
public Optional<Version> sourcePlatform() { return sourcePlatform; }
public Optional<ApplicationVersion> sourceApplication() { return sourceApplication; }
@Override
public String toString() {
return String.format("platform %s%s, application %s%s",
targetPlatform,
sourcePlatform.filter(version -> ! version.equals(targetPlatform))
.map(v -> " (from " + v + ")").orElse(""),
targetApplication.id(),
sourceApplication.filter(version -> ! version.equals(targetApplication))
.map(v -> " (from " + v.id() + ")").orElse(""));
}
}
} |
Will git ever learn to diff indentation better? >_< | private List<Job> computeReadyJobs(ApplicationId id) {
List<Job> jobs = new ArrayList<>();
applications().get(id).ifPresent(application -> {
List<Step> productionSteps = application.deploymentSpec().steps().stream()
.filter(step -> step.deploysTo(prod) || step.zones().isEmpty())
.collect(toList());
Change change = application.changeAt(clock.instant());
@SuppressWarnings("cast")
Optional<Instant> completedAt = max((Optional<Instant>) application.deploymentJobs().statusOf(systemTest)
.flatMap(job -> job.lastSuccess().map(JobRun::at)),
(Optional<Instant>) application.deploymentJobs().statusOf(stagingTest)
.flatMap(job -> job.lastSuccess().map(JobRun::at)));
String reason = "New change available";
List<Job> testJobs = null;
if (change.isPresent())
for (Step step : productionSteps) {
Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet());
Map<Optional<Instant>, List<JobType>> jobsByCompletion = stepJobs.stream().collect(groupingBy(job -> completedAt(change, application, job)));
if (jobsByCompletion.containsKey(empty())) {
for (JobType job : jobsByCompletion.get(empty())) {
State target = targetFor(application, change, deploymentFor(application, job));
if (isTested(application, target)) {
if (completedAt.isPresent())
jobs.add(deploymentJob(application, target, change, job, reason, completedAt.get(), stepJobs));
}
else if (testJobs == null) {
if ( ! alreadyTriggered(application, target))
testJobs = testJobsFor(application, target, "Testing deployment for " + job.jobName(), completedAt.orElse(clock.instant()));
else
testJobs = emptyList();
}
}
}
else {
if (stepJobs.isEmpty()) {
Duration delay = ((DeploymentSpec.Delay) step).duration();
completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant()));
reason += " after a delay of " + delay;
}
else {
completedAt = jobsByCompletion.keySet().stream().map(Optional::get).max(naturalOrder());
reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", "));
}
}
}
if (testJobs == null)
testJobs = testJobsFor(application, targetFor(application, application.change(), empty()), "Testing last changes outside prod", clock.instant());
jobs.addAll(testJobs);
removeCompletedChange(application);
});
return jobs;
} | } | private List<Job> computeReadyJobs(ApplicationId id) {
List<Job> jobs = new ArrayList<>();
applications().get(id).ifPresent(application -> {
List<Step> productionSteps = application.deploymentSpec().steps().stream()
.filter(step -> step.deploysTo(prod) || step.zones().isEmpty())
.collect(toList());
Change change = application.changeAt(clock.instant());
@SuppressWarnings("cast")
Optional<Instant> completedAt = max((Optional<Instant>) application.deploymentJobs().statusOf(systemTest)
.flatMap(job -> job.lastSuccess().map(JobRun::at)),
(Optional<Instant>) application.deploymentJobs().statusOf(stagingTest)
.flatMap(job -> job.lastSuccess().map(JobRun::at)));
String reason = "New change available";
List<Job> testJobs = null;
if (change.isPresent())
for (Step step : productionSteps) {
Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet());
Map<Optional<Instant>, List<JobType>> jobsByCompletion = stepJobs.stream().collect(groupingBy(job -> completedAt(change, application, job)));
if (jobsByCompletion.containsKey(empty())) {
for (JobType job : jobsByCompletion.get(empty())) {
State target = targetFor(application, change, deploymentFor(application, job));
if (isTested(application, target)) {
if (completedAt.isPresent())
jobs.add(deploymentJob(application, target, change, job, reason, completedAt.get(), stepJobs));
}
else if (testJobs == null) {
if ( ! alreadyTriggered(application, target))
testJobs = testJobsFor(application, target, "Testing deployment for " + job.jobName(), completedAt.orElse(clock.instant()));
else
testJobs = emptyList();
}
}
}
else {
if (stepJobs.isEmpty()) {
Duration delay = ((DeploymentSpec.Delay) step).duration();
completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant()));
reason += " after a delay of " + delay;
}
else {
completedAt = jobsByCompletion.keySet().stream().map(Optional::get).max(naturalOrder());
reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", "));
}
}
}
if (testJobs == null)
testJobs = testJobsFor(application, targetFor(application, application.change(), empty()), "Testing last changes outside prod", clock.instant());
jobs.addAll(testJobs);
removeCompletedChange(application);
});
return jobs;
} | class DeploymentTrigger {
private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName());
private final Controller controller;
private final Clock clock;
private final DeploymentOrder order;
private final BuildService buildService;
public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) {
Objects.requireNonNull(controller, "controller cannot be null");
Objects.requireNonNull(curator, "curator cannot be null");
Objects.requireNonNull(clock, "clock cannot be null");
this.controller = controller;
this.clock = clock;
this.order = new DeploymentOrder(controller::system);
this.buildService = buildService;
}
public DeploymentOrder deploymentOrder() {
return order;
}
/**
* Called each time a job completes (successfully or not) to record information used when deciding what to trigger.
*/
public void notifyOfCompletion(JobReport report) {
log.log(LogLevel.INFO, String.format("Got notified of %s for %s of %s (%d).",
report.jobError().map(JobError::toString).orElse("success"),
report.jobType(),
report.applicationId(),
report.projectId()));
if ( ! applications().get(report.applicationId()).isPresent()) {
log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() +
"': Unknown application '" + report.applicationId() + "'");
return;
}
applications().lockOrThrow(report.applicationId(), application -> {
JobRun triggering;
if (report.jobType() == component) {
ApplicationVersion applicationVersion = ApplicationVersion.from(report.sourceRevision().get(), report.buildNumber());
triggering = JobRun.triggering(controller.systemVersion(), applicationVersion, empty(), empty(), "Application commit", clock.instant());
if (report.success()) {
if (acceptNewApplicationVersion(application))
application = application.withChange(application.change().with(applicationVersion))
.withOutstandingChange(Change.empty());
else
application = application.withOutstandingChange(Change.of(applicationVersion));
}
}
else triggering = application.deploymentJobs().statusOf(report.jobType()).flatMap(JobStatus::lastTriggered)
.orElseThrow(() -> new IllegalStateException("Got notified about completion of " + report.jobType().jobName() + " for " +
report.applicationId() + ", but that has neither been triggered nor deployed"));
applications().store(application.withJobCompletion(report.projectId(),
report.jobType(),
triggering.completion(report.buildNumber(), clock.instant()),
report.jobError()));
});
}
/**
* Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs.
*
* Only one job is triggered each run for test jobs, since their environments have limited capacity.
*/
public long triggerReadyJobs() {
return computeReadyJobs().collect(partitioningBy(job -> job.jobType().isTest()))
.entrySet().stream()
.flatMap(entry -> (entry.getKey()
? entry.getValue().stream()
.sorted(comparing(Job::isRetry)
.thenComparing(Job::applicationUpgrade)
.reversed()
.thenComparing(Job::availableSince))
.collect(groupingBy(Job::jobType))
: entry.getValue().stream()
.collect(groupingBy(Job::applicationId)))
.values().stream()
.map(jobs -> (Supplier<Long>) jobs.stream()
.filter(job -> canTrigger(job) && trigger(job))
.limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count))
.parallel().map(Supplier::get).reduce(0L, Long::sum);
}
/**
* Attempts to trigger the given job for the given application and returns the outcome.
*
* If the build service can not find the given job, or claims it is illegal to trigger it,
* the project id is removed from the application owning the job, to prevent further trigger attemps.
*/
public boolean trigger(Job job) {
log.log(LogLevel.INFO, String.format("Attempting to trigger %s: %s (%s)", job, job.reason, job.target));
try {
buildService.trigger(job);
applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withJobTriggering(
job.jobType, JobRun.triggering(job.target.targetPlatform, job.target.targetApplication,
job.target.sourcePlatform, job.target.sourceApplication,
job.reason, clock.instant()))));
return true;
}
catch (RuntimeException e) {
log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e);
if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException)
applications().lockOrThrow(job.applicationId(), application ->
applications().store(application.withProjectId(OptionalLong.empty())));
return false;
}
}
/**
* Triggers a change of this application
*
* @param applicationId the application to trigger
* @throws IllegalArgumentException if this application already has an ongoing change
*/
public void triggerChange(ApplicationId applicationId, Change change) {
applications().lockOrThrow(applicationId, application -> {
if (application.change().isPresent() && ! application.deploymentJobs().hasFailures())
throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " +
application.change() + " is already in progress");
application = application.withChange(change);
if (change.application().isPresent())
application = application.withOutstandingChange(Change.empty());
applications().store(application);
});
}
/** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */
public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) {
applications().lockOrThrow(applicationId, application -> {
applications().store(application.withChange(application.change().application()
.filter(__ -> keepApplicationChange)
.map(Change::of)
.orElse(Change.empty())));
});
}
public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() {
return computeReadyJobs().collect(groupingBy(Job::jobType));
}
/** Returns the set of all jobs which have changes to propagate from the upstream steps. */
private Stream<Job> computeReadyJobs() {
return ApplicationList.from(applications().asList())
.notPullRequest()
.withProjectId()
.deploying()
.idList().stream()
.map(this::computeReadyJobs)
.flatMap(List::stream);
}
/** Returns whether the given job is currently running; false if completed since last triggered, asking the build service otherwise. */
public boolean isRunning(Application application, JobType jobType) {
return ! application.deploymentJobs().statusOf(jobType)
.flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at())))
.orElse(false)
&& jobStateIsAmong(application, jobType, running, queued);
}
private boolean jobStateIsAmong(Application application, JobType jobType, JobState state, JobState... states) {
return EnumSet.of(state, states).contains(buildService.stateOf(BuildJob.of(application.id(),
application.deploymentJobs().projectId().getAsLong(),
jobType.jobName())));
}
public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType) {
Application application = applications().require(applicationId);
if (jobType == component) {
buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName()));
return singletonList(component);
}
State target = targetFor(application, application.change(), deploymentFor(application, jobType));
String reason = ">:o:< Triggered by force! (-o-) |-o-| (=oo=)";
if ( ! jobType.isProduction() || isTested(application, target)) {
trigger(deploymentJob(application, target, application.change(), jobType, reason, clock.instant(), Collections.emptySet()));
return singletonList(jobType);
}
List<Job> testJobs = testJobsFor(application, target, reason, clock.instant());
testJobs.forEach(this::trigger);
return testJobs.stream().map(Job::jobType).collect(toList());
}
private Job deploymentJob(Application application, State target, Change change, JobType jobType, String reason, Instant availableSince, Collection<JobType> concurrentlyWith) {
boolean isRetry = application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::jobError)
.filter(JobError.outOfCapacity::equals).isPresent();
if (isRetry) reason += "; retrying on out of capacity";
return new Job(application, target, change, jobType, reason, availableSince, concurrentlyWith, isRetry);
}
private Version targetPlatform(Application application, Change change, Optional<Deployment> deployment) {
return max(deployment.map(Deployment::version), change.platform())
.orElse(application.oldestDeployedPlatform()
.orElse(controller.systemVersion()));
}
private ApplicationVersion targetApplication(Application application, Change change, Optional<Deployment> deployment) {
return max(deployment.map(Deployment::applicationVersion), change.application())
.orElse(application.oldestDeployedApplication()
.orElse(application.deploymentJobs().jobStatus().get(component).lastSuccess().get().application()));
}
private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) {
return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2;
}
/**
* Finds the next step to trigger for the given application, if any, and returns these as a list.
*/
private void removeCompletedChange(Application application) {
List<JobType> jobs = (application.deploymentSpec().steps().isEmpty()
? singletonList(new DeploymentSpec.DeclaredZone(test))
: application.deploymentSpec().steps()).stream()
.flatMap(step -> step.zones().stream())
.map(order::toJob)
.collect(toList());
boolean platformComplete = application.change().platform().map(Change::of)
.map(change -> jobs.stream().allMatch(job -> completedAt(change, application, job).isPresent()))
.orElse(false);
boolean applicationComplete = application.change().application().map(Change::of)
.map(change -> jobs.stream().allMatch(job -> completedAt(change, application, job).isPresent()))
.orElse(false);
if (platformComplete || applicationComplete)
applications().lockIfPresent(application.id(), lockedApplication -> {
if ( ! application.change().equals(lockedApplication.change()))
return;
Change change = application.change();
if (platformComplete) change = change.withoutPlatform();
if (applicationComplete) change = change.withoutApplication();
applications().store(lockedApplication.withChange(change));
});
}
/**
* Returns the list of test jobs that should run now, and that need to succeed on the given target for it to be considered tested.
*/
private List<Job> testJobsFor(Application application, State target, String reason, Instant availableSince) {
List<Step> steps = application.deploymentSpec().steps();
if (steps.isEmpty()) steps = singletonList(new DeploymentSpec.DeclaredZone(test));
List<Job> jobs = new ArrayList<>();
for (Step step : steps.stream().filter(step -> step.deploysTo(test) || step.deploysTo(staging)).collect(toList())) {
for (JobType jobType : step.zones().stream().map(order::toJob).collect(toList())) {
Optional<JobRun> completion = successOn(application, jobType, target)
.filter(run -> jobType != stagingTest || sourcesMatchIfPresent(target, run));
if ( ! completion.isPresent())
jobs.add(deploymentJob(application, target, application.change(), jobType, reason, availableSince, emptySet()));
}
}
return jobs;
}
private boolean isTested(Application application, State target) {
return testedAt(application, target).isPresent()
|| alreadyTriggered(application, target);
}
/** If the given state's sources are present and differ from its targets, returns whether they are equal to those of the given job run. */
private static boolean sourcesMatchIfPresent(State target, JobRun jobRun) {
return ( ! target.sourcePlatform.filter(version -> ! version.equals(target.targetPlatform)).isPresent()
|| target.sourcePlatform.equals(jobRun.sourcePlatform()))
&& ( ! target.sourceApplication.filter(version -> ! version.equals(target.targetApplication)).isPresent()
|| target.sourceApplication.equals(jobRun.sourceApplication()));
}
private static boolean targetsMatch(State target, JobRun jobRun) {
return target.targetPlatform.equals(jobRun.platform()) && target.targetApplication.equals(jobRun.application());
}
private Optional<Instant> testedAt(Application application, State target) {
Optional<JobRun> testRun = successOn(application, systemTest, target);
Optional<JobRun> stagingRun = successOn(application, stagingTest, target)
.filter(run -> sourcesMatchIfPresent(target, run));
return max(testRun.map(JobRun::at), stagingRun.map(JobRun::at))
.filter(__ -> testRun.isPresent() && stagingRun.isPresent());
}
private boolean alreadyTriggered(Application application, State target) {
return application.deploymentJobs().jobStatus().values().stream()
.filter(job -> job.type().isProduction())
.anyMatch(job -> job.lastTriggered()
.filter(run -> targetsMatch(target, run))
.filter(run -> sourcesMatchIfPresent(target, run))
.isPresent());
}
/**
* Returns the instant when the given change is complete for the given application for the given job.
*
* Any job is complete if the given change is already successful on that job.
* A production job is also considered complete if its current change is strictly dominated by what
* is already deployed in its zone, i.e., no parts of the change are upgrades, and the full current
* change for the application downgrades the deployment, which is an acknowledgement that the deployed
* version is broken somehow, such that the job may be locked in failure until a new version is released.
*/
private Optional<Instant> completedAt(Change change, Application application, JobType jobType) {
State target = targetFor(application, change, deploymentFor(application, jobType));
Optional<JobRun> lastSuccess = successOn(application, jobType, target);
if (lastSuccess.isPresent() || ! jobType.isProduction())
return lastSuccess.map(JobRun::at);
return deploymentFor(application, jobType)
.filter(deployment -> ! ( change.upgrades(deployment.version())
|| change.upgrades(deployment.applicationVersion()))
&& ( application.change().downgrades(deployment.version())
|| application.change().downgrades(deployment.applicationVersion())))
.map(Deployment::at);
}
private Optional<JobRun> successOn(Application application, JobType jobType, State target) {
return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess)
.filter(run -> targetsMatch(target, run));
}
private boolean canTrigger(Job job) {
Application application = applications().require(job.applicationId());
if (isRunning(application, job.jobType) || jobStateIsAmong(application, job.jobType, disabled))
return false;
if (successOn(application, job.jobType, job.target).filter(run -> sourcesMatchIfPresent(job.target, run)).isPresent())
return false;
if ( ! job.jobType.isProduction())
return true;
if ( ! job.concurrentlyWith.containsAll(runningProductionJobsFor(application)))
return false;
return true;
}
private List<JobType> runningProductionJobsFor(Application application) {
return application.deploymentJobs().jobStatus().keySet().parallelStream()
.filter(job -> job.isProduction())
.filter(job -> isRunning(application, job))
.collect(toList());
}
private ApplicationController applications() {
return controller.applications();
}
private boolean acceptNewApplicationVersion(Application application) {
if (application.change().application().isPresent()) return true;
if (application.deploymentJobs().hasFailures()) return true;
return ! application.changeAt(clock.instant()).platform().isPresent();
}
private Optional<Deployment> deploymentFor(Application application, JobType jobType) {
return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()).get()));
}
private State targetFor(Application application, Change change, Optional<Deployment> deployment) {
return new State(targetPlatform(application, change, deployment),
targetApplication(application, change, deployment),
deployment.map(Deployment::version),
deployment.map(Deployment::applicationVersion));
}
private static class Job extends BuildJob {
private final Change change;
private final JobType jobType;
private final String reason;
private final Instant availableSince;
private final Collection<JobType> concurrentlyWith;
private final boolean isRetry;
private final boolean isApplicationUpgrade;
private final State target;
private Job(Application application, State target, Change change, JobType jobType, String reason, Instant availableSince, Collection<JobType> concurrentlyWith, boolean isRetry) {
super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName());
this.change = change;
this.jobType = jobType;
this.availableSince = availableSince;
this.concurrentlyWith = concurrentlyWith;
this.reason = reason;
this.isRetry = isRetry;
this.isApplicationUpgrade = change.application().isPresent();
this.target = target;
}
JobType jobType() { return jobType; }
Instant availableSince() { return availableSince; }
boolean isRetry() { return isRetry; }
boolean applicationUpgrade() { return isApplicationUpgrade; }
}
public static class State {
private final Version targetPlatform;
private final ApplicationVersion targetApplication;
private final Optional<Version> sourcePlatform;
private final Optional<ApplicationVersion> sourceApplication;
public State(Version targetPlatform, ApplicationVersion targetApplication, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication) {
this.targetPlatform = targetPlatform;
this.targetApplication = targetApplication;
this.sourcePlatform = sourcePlatform;
this.sourceApplication = sourceApplication;
}
public Version targetPlatform() { return targetPlatform; }
public ApplicationVersion targetApplication() { return targetApplication; }
public Optional<Version> sourcePlatform() { return sourcePlatform; }
public Optional<ApplicationVersion> sourceApplication() { return sourceApplication; }
@Override
public String toString() {
return String.format("platform %s%s, application %s%s",
targetPlatform,
sourcePlatform.filter(version -> ! version.equals(targetPlatform))
.map(v -> " (from " + v + ")").orElse(""),
targetApplication.id(),
sourceApplication.filter(version -> ! version.equals(targetApplication))
.map(v -> " (from " + v.id() + ")").orElse(""));
}
}
} | class DeploymentTrigger {
private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName());
private final Controller controller;
private final Clock clock;
private final DeploymentOrder order;
private final BuildService buildService;
public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) {
Objects.requireNonNull(controller, "controller cannot be null");
Objects.requireNonNull(curator, "curator cannot be null");
Objects.requireNonNull(clock, "clock cannot be null");
this.controller = controller;
this.clock = clock;
this.order = new DeploymentOrder(controller::system);
this.buildService = buildService;
}
public DeploymentOrder deploymentOrder() {
return order;
}
/**
* Called each time a job completes (successfully or not) to record information used when deciding what to trigger.
*/
public void notifyOfCompletion(JobReport report) {
log.log(LogLevel.INFO, String.format("Got notified of %s for %s of %s (%d).",
report.jobError().map(JobError::toString).orElse("success"),
report.jobType(),
report.applicationId(),
report.projectId()));
if ( ! applications().get(report.applicationId()).isPresent()) {
log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() +
"': Unknown application '" + report.applicationId() + "'");
return;
}
applications().lockOrThrow(report.applicationId(), application -> {
JobRun triggering;
if (report.jobType() == component) {
ApplicationVersion applicationVersion = ApplicationVersion.from(report.sourceRevision().get(), report.buildNumber());
triggering = JobRun.triggering(controller.systemVersion(), applicationVersion, empty(), empty(), "Application commit", clock.instant());
if (report.success()) {
if (acceptNewApplicationVersion(application))
application = application.withChange(application.change().with(applicationVersion))
.withOutstandingChange(Change.empty());
else
application = application.withOutstandingChange(Change.of(applicationVersion));
}
}
else triggering = application.deploymentJobs().statusOf(report.jobType()).flatMap(JobStatus::lastTriggered)
.orElseThrow(() -> new IllegalStateException("Got notified about completion of " + report.jobType().jobName() + " for " +
report.applicationId() + ", but that has neither been triggered nor deployed"));
applications().store(application.withJobCompletion(report.projectId(),
report.jobType(),
triggering.completion(report.buildNumber(), clock.instant()),
report.jobError()));
});
}
/**
* Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs.
*
* Only one job is triggered each run for test jobs, since their environments have limited capacity.
*/
public long triggerReadyJobs() {
return computeReadyJobs().collect(partitioningBy(job -> job.jobType().isTest()))
.entrySet().stream()
.flatMap(entry -> (entry.getKey()
? entry.getValue().stream()
.sorted(comparing(Job::isRetry)
.thenComparing(Job::applicationUpgrade)
.reversed()
.thenComparing(Job::availableSince))
.collect(groupingBy(Job::jobType))
: entry.getValue().stream()
.collect(groupingBy(Job::applicationId)))
.values().stream()
.map(jobs -> (Supplier<Long>) jobs.stream()
.filter(job -> canTrigger(job) && trigger(job))
.limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count))
.parallel().map(Supplier::get).reduce(0L, Long::sum);
}
/**
* Attempts to trigger the given job for the given application and returns the outcome.
*
* If the build service can not find the given job, or claims it is illegal to trigger it,
* the project id is removed from the application owning the job, to prevent further trigger attemps.
*/
public boolean trigger(Job job) {
log.log(LogLevel.INFO, String.format("Attempting to trigger %s: %s (%s)", job, job.reason, job.target));
try {
buildService.trigger(job);
applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withJobTriggering(
job.jobType, JobRun.triggering(job.target.targetPlatform, job.target.targetApplication,
job.target.sourcePlatform, job.target.sourceApplication,
job.reason, clock.instant()))));
return true;
}
catch (RuntimeException e) {
log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e);
if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException)
applications().lockOrThrow(job.applicationId(), application ->
applications().store(application.withProjectId(OptionalLong.empty())));
return false;
}
}
/**
* Triggers a change of this application
*
* @param applicationId the application to trigger
* @throws IllegalArgumentException if this application already has an ongoing change
*/
public void triggerChange(ApplicationId applicationId, Change change) {
applications().lockOrThrow(applicationId, application -> {
if (application.change().isPresent() && ! application.deploymentJobs().hasFailures())
throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " +
application.change() + " is already in progress");
application = application.withChange(change);
if (change.application().isPresent())
application = application.withOutstandingChange(Change.empty());
applications().store(application);
});
}
/** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */
public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) {
applications().lockOrThrow(applicationId, application -> {
applications().store(application.withChange(application.change().application()
.filter(__ -> keepApplicationChange)
.map(Change::of)
.orElse(Change.empty())));
});
}
public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() {
return computeReadyJobs().collect(groupingBy(Job::jobType));
}
/** Returns the set of all jobs which have changes to propagate from the upstream steps. */
private Stream<Job> computeReadyJobs() {
return ApplicationList.from(applications().asList())
.notPullRequest()
.withProjectId()
.deploying()
.idList().stream()
.map(this::computeReadyJobs)
.flatMap(List::stream);
}
/** Returns whether the given job is currently running; false if completed since last triggered, asking the build service otherwise. */
public boolean isRunning(Application application, JobType jobType) {
return ! application.deploymentJobs().statusOf(jobType)
.flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at())))
.orElse(false)
&& jobStateIsAmong(application, jobType, running, queued);
}
private boolean jobStateIsAmong(Application application, JobType jobType, JobState state, JobState... states) {
return EnumSet.of(state, states).contains(buildService.stateOf(BuildJob.of(application.id(),
application.deploymentJobs().projectId().getAsLong(),
jobType.jobName())));
}
public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType) {
Application application = applications().require(applicationId);
if (jobType == component) {
buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName()));
return singletonList(component);
}
State target = targetFor(application, application.change(), deploymentFor(application, jobType));
String reason = ">:o:< Triggered by force! (-o-) |-o-| (=oo=)";
if ( ! jobType.isProduction() || isTested(application, target)) {
trigger(deploymentJob(application, target, application.change(), jobType, reason, clock.instant(), Collections.emptySet()));
return singletonList(jobType);
}
List<Job> testJobs = testJobsFor(application, target, reason, clock.instant());
testJobs.forEach(this::trigger);
return testJobs.stream().map(Job::jobType).collect(toList());
}
private Job deploymentJob(Application application, State target, Change change, JobType jobType, String reason, Instant availableSince, Collection<JobType> concurrentlyWith) {
boolean isRetry = application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::jobError)
.filter(JobError.outOfCapacity::equals).isPresent();
if (isRetry) reason += "; retrying on out of capacity";
return new Job(application, target, change, jobType, reason, availableSince, concurrentlyWith, isRetry);
}
private Version targetPlatform(Application application, Change change, Optional<Deployment> deployment) {
return max(deployment.map(Deployment::version), change.platform())
.orElse(application.oldestDeployedPlatform()
.orElse(controller.systemVersion()));
}
private ApplicationVersion targetApplication(Application application, Change change, Optional<Deployment> deployment) {
return max(deployment.map(Deployment::applicationVersion), change.application())
.orElse(application.oldestDeployedApplication()
.orElse(application.deploymentJobs().jobStatus().get(component).lastSuccess().get().application()));
}
private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) {
return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2;
}
/**
* Finds the next step to trigger for the given application, if any, and returns these as a list.
*/
private void removeCompletedChange(Application application) {
List<JobType> jobs = (application.deploymentSpec().steps().isEmpty()
? singletonList(new DeploymentSpec.DeclaredZone(test))
: application.deploymentSpec().steps()).stream()
.flatMap(step -> step.zones().stream())
.map(order::toJob)
.collect(toList());
boolean platformComplete = application.change().platform().map(Change::of)
.map(change -> jobs.stream().allMatch(job -> completedAt(change, application, job).isPresent()))
.orElse(false);
boolean applicationComplete = application.change().application().map(Change::of)
.map(change -> jobs.stream().allMatch(job -> completedAt(change, application, job).isPresent()))
.orElse(false);
if (platformComplete || applicationComplete)
applications().lockIfPresent(application.id(), lockedApplication -> {
if ( ! application.change().equals(lockedApplication.change()))
return;
Change change = application.change();
if (platformComplete) change = change.withoutPlatform();
if (applicationComplete) change = change.withoutApplication();
applications().store(lockedApplication.withChange(change));
});
}
/**
* Returns the list of test jobs that should run now, and that need to succeed on the given target for it to be considered tested.
*/
private List<Job> testJobsFor(Application application, State target, String reason, Instant availableSince) {
List<Step> steps = application.deploymentSpec().steps();
if (steps.isEmpty()) steps = singletonList(new DeploymentSpec.DeclaredZone(test));
List<Job> jobs = new ArrayList<>();
for (Step step : steps.stream().filter(step -> step.deploysTo(test) || step.deploysTo(staging)).collect(toList())) {
for (JobType jobType : step.zones().stream().map(order::toJob).collect(toList())) {
Optional<JobRun> completion = successOn(application, jobType, target)
.filter(run -> jobType != stagingTest || sourcesMatchIfPresent(target, run));
if ( ! completion.isPresent())
jobs.add(deploymentJob(application, target, application.change(), jobType, reason, availableSince, emptySet()));
}
}
return jobs;
}
private boolean isTested(Application application, State target) {
return testedAt(application, target).isPresent()
|| alreadyTriggered(application, target);
}
/** If the given state's sources are present and differ from its targets, returns whether they are equal to those of the given job run. */
private static boolean sourcesMatchIfPresent(State target, JobRun jobRun) {
return ( ! target.sourcePlatform.filter(version -> ! version.equals(target.targetPlatform)).isPresent()
|| target.sourcePlatform.equals(jobRun.sourcePlatform()))
&& ( ! target.sourceApplication.filter(version -> ! version.equals(target.targetApplication)).isPresent()
|| target.sourceApplication.equals(jobRun.sourceApplication()));
}
private static boolean targetsMatch(State target, JobRun jobRun) {
return target.targetPlatform.equals(jobRun.platform()) && target.targetApplication.equals(jobRun.application());
}
private Optional<Instant> testedAt(Application application, State target) {
Optional<JobRun> testRun = successOn(application, systemTest, target);
Optional<JobRun> stagingRun = successOn(application, stagingTest, target)
.filter(run -> sourcesMatchIfPresent(target, run));
return max(testRun.map(JobRun::at), stagingRun.map(JobRun::at))
.filter(__ -> testRun.isPresent() && stagingRun.isPresent());
}
private boolean alreadyTriggered(Application application, State target) {
return application.deploymentJobs().jobStatus().values().stream()
.filter(job -> job.type().isProduction())
.anyMatch(job -> job.lastTriggered()
.filter(run -> targetsMatch(target, run))
.filter(run -> sourcesMatchIfPresent(target, run))
.isPresent());
}
/**
* Returns the instant when the given change is complete for the given application for the given job.
*
* Any job is complete if the given change is already successful on that job.
* A production job is also considered complete if its current change is strictly dominated by what
* is already deployed in its zone, i.e., no parts of the change are upgrades, and the full current
* change for the application downgrades the deployment, which is an acknowledgement that the deployed
* version is broken somehow, such that the job may be locked in failure until a new version is released.
*/
private Optional<Instant> completedAt(Change change, Application application, JobType jobType) {
State target = targetFor(application, change, deploymentFor(application, jobType));
Optional<JobRun> lastSuccess = successOn(application, jobType, target);
if (lastSuccess.isPresent() || ! jobType.isProduction())
return lastSuccess.map(JobRun::at);
return deploymentFor(application, jobType)
.filter(deployment -> ! ( change.upgrades(deployment.version())
|| change.upgrades(deployment.applicationVersion()))
&& ( application.change().downgrades(deployment.version())
|| application.change().downgrades(deployment.applicationVersion())))
.map(Deployment::at);
}
private Optional<JobRun> successOn(Application application, JobType jobType, State target) {
return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess)
.filter(run -> targetsMatch(target, run));
}
private boolean canTrigger(Job job) {
Application application = applications().require(job.applicationId());
if (isRunning(application, job.jobType) || jobStateIsAmong(application, job.jobType, disabled))
return false;
if (successOn(application, job.jobType, job.target).filter(run -> sourcesMatchIfPresent(job.target, run)).isPresent())
return false;
if ( ! job.jobType.isProduction())
return true;
if ( ! job.concurrentlyWith.containsAll(runningProductionJobsFor(application)))
return false;
return true;
}
private List<JobType> runningProductionJobsFor(Application application) {
return application.deploymentJobs().jobStatus().keySet().parallelStream()
.filter(job -> job.isProduction())
.filter(job -> isRunning(application, job))
.collect(toList());
}
private ApplicationController applications() {
return controller.applications();
}
private boolean acceptNewApplicationVersion(Application application) {
if (application.change().application().isPresent()) return true;
if (application.deploymentJobs().hasFailures()) return true;
return ! application.changeAt(clock.instant()).platform().isPresent();
}
private Optional<Deployment> deploymentFor(Application application, JobType jobType) {
return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()).get()));
}
private State targetFor(Application application, Change change, Optional<Deployment> deployment) {
return new State(targetPlatform(application, change, deployment),
targetApplication(application, change, deployment),
deployment.map(Deployment::version),
deployment.map(Deployment::applicationVersion));
}
private static class Job extends BuildJob {
private final Change change;
private final JobType jobType;
private final String reason;
private final Instant availableSince;
private final Collection<JobType> concurrentlyWith;
private final boolean isRetry;
private final boolean isApplicationUpgrade;
private final State target;
private Job(Application application, State target, Change change, JobType jobType, String reason, Instant availableSince, Collection<JobType> concurrentlyWith, boolean isRetry) {
super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName());
this.change = change;
this.jobType = jobType;
this.availableSince = availableSince;
this.concurrentlyWith = concurrentlyWith;
this.reason = reason;
this.isRetry = isRetry;
this.isApplicationUpgrade = change.application().isPresent();
this.target = target;
}
JobType jobType() { return jobType; }
Instant availableSince() { return availableSince; }
boolean isRetry() { return isRetry; }
boolean applicationUpgrade() { return isApplicationUpgrade; }
}
public static class State {
private final Version targetPlatform;
private final ApplicationVersion targetApplication;
private final Optional<Version> sourcePlatform;
private final Optional<ApplicationVersion> sourceApplication;
public State(Version targetPlatform, ApplicationVersion targetApplication, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication) {
this.targetPlatform = targetPlatform;
this.targetApplication = targetApplication;
this.sourcePlatform = sourcePlatform;
this.sourceApplication = sourceApplication;
}
public Version targetPlatform() { return targetPlatform; }
public ApplicationVersion targetApplication() { return targetApplication; }
public Optional<Version> sourcePlatform() { return sourcePlatform; }
public Optional<ApplicationVersion> sourceApplication() { return sourceApplication; }
@Override
public String toString() {
return String.format("platform %s%s, application %s%s",
targetPlatform,
sourcePlatform.filter(version -> ! version.equals(targetPlatform))
.map(v -> " (from " + v + ")").orElse(""),
targetApplication.id(),
sourceApplication.filter(version -> ! version.equals(targetApplication))
.map(v -> " (from " + v.id() + ")").orElse(""));
}
}
} |
Rename the variable as well, perhaps? | public static VersionStatus compute(Controller controller) {
ListMap<Version, HostName> configServerVersions = findSystemApplicationVersions(controller);
ListMap<Version, HostName> controllerVersions = findControllerVersions(controller);
Set<Version> infrastructureVersions = new HashSet<>();
infrastructureVersions.addAll(controllerVersions.keySet());
infrastructureVersions.addAll(configServerVersions.keySet());
Version controllerVersion = controllerVersions.keySet().stream().sorted().findFirst().get();
Version systemVersion = infrastructureVersions.stream().sorted().findFirst().get();
Collection<DeploymentStatistics> deploymentStatistics = computeDeploymentStatistics(infrastructureVersions,
controller.applications().asList());
List<VespaVersion> versions = new ArrayList<>();
for (DeploymentStatistics statistics : deploymentStatistics) {
if (statistics.version().isEmpty()) continue;
try {
VespaVersion vespaVersion = createVersion(statistics,
statistics.version().equals(controllerVersion),
statistics.version().equals(systemVersion),
configServerVersions.getList(statistics.version()),
controller);
versions.add(vespaVersion);
} catch (IllegalArgumentException e) {
log.log(Level.WARNING, "Unable to create VespaVersion for version " +
statistics.version().toFullString(), e);
}
}
Collections.sort(versions);
return new VersionStatus(versions);
} | ListMap<Version, HostName> configServerVersions = findSystemApplicationVersions(controller); | public static VersionStatus compute(Controller controller) {
ListMap<Version, HostName> systemApplicationVersions = findSystemApplicationVersions(controller);
ListMap<Version, HostName> controllerVersions = findControllerVersions(controller);
Set<Version> infrastructureVersions = new HashSet<>();
infrastructureVersions.addAll(controllerVersions.keySet());
infrastructureVersions.addAll(systemApplicationVersions.keySet());
Version controllerVersion = controllerVersions.keySet().stream().sorted().findFirst().get();
Version systemVersion = infrastructureVersions.stream().sorted().findFirst().get();
Collection<DeploymentStatistics> deploymentStatistics = computeDeploymentStatistics(infrastructureVersions,
controller.applications().asList());
List<VespaVersion> versions = new ArrayList<>();
for (DeploymentStatistics statistics : deploymentStatistics) {
if (statistics.version().isEmpty()) continue;
try {
VespaVersion vespaVersion = createVersion(statistics,
statistics.version().equals(controllerVersion),
statistics.version().equals(systemVersion),
systemApplicationVersions.getList(statistics.version()),
controller);
versions.add(vespaVersion);
} catch (IllegalArgumentException e) {
log.log(Level.WARNING, "Unable to create VespaVersion for version " +
statistics.version().toFullString(), e);
}
}
Collections.sort(versions);
return new VersionStatus(versions);
} | class VersionStatus {
private static final Logger log = Logger.getLogger(VersionStatus.class.getName());
private static final String VESPA_REPO = "vespa-yahoo";
private static final String VESPA_REPO_OWNER = "vespa";
private final ImmutableList<VespaVersion> versions;
/** Create a version status. DO NOT USE: Public for testing and serialization only */
public VersionStatus(List<VespaVersion> versions) {
this.versions = ImmutableList.copyOf(versions);
}
/** Returns the current version of controllers in this system */
public Optional<VespaVersion> controllerVersion() {
return versions().stream().filter(VespaVersion::isControllerVersion).findFirst();
}
/**
* Returns the current Vespa version of the system controlled by this,
* or empty if we have not currently determined what the system version is in this status.
*/
public Optional<VespaVersion> systemVersion() {
return versions().stream().filter(VespaVersion::isSystemVersion).findFirst();
}
/**
* Lists all currently active Vespa versions, with deployment statistics,
* sorted from lowest to highest version number.
* The returned list is immutable.
* Calling this is free, but the returned status is slightly out of date.
*/
public List<VespaVersion> versions() { return versions; }
/** Returns the given version, or null if it is not present */
public VespaVersion version(Version version) {
return versions.stream().filter(v -> v.versionNumber().equals(version)).findFirst().orElse(null);
}
/** Create the empty version status */
public static VersionStatus empty() { return new VersionStatus(ImmutableList.of()); }
/** Create a full, updated version status. This is expensive and should be done infrequently */
private static ListMap<Version, HostName> findSystemApplicationVersions(Controller controller) {
List<ZoneId> zones = controller.zoneRegistry().zones()
.controllerManaged()
.not().among(ZoneId.from("prod.cd-us-east-1a"),
ZoneId.from("prod.cd-aws-us-east-1a"),
ZoneId.from("prod.aws-us-east-1a"),
ZoneId.from("dev.aws-us-east-2a"))
.ids();
ListMap<Version, HostName> versions = new ListMap<>();
for (SystemApplication application : SystemApplication.all()) {
for (ZoneId zone : zones) {
for (Node node : controller.configServer().nodeRepository().list(zone, application.id())) {
versions.put(node.currentVersion(), node.hostname());
}
}
}
return versions;
}
private static ListMap<Version, HostName> findControllerVersions(Controller controller) {
ListMap<Version, HostName> versions = new ListMap<>();
if (controller.curator().cluster().isEmpty()) {
versions.put(Vtag.currentVersion, controller.hostname());
} else {
for (HostName hostname : controller.curator().cluster()) {
versions.put(controller.curator().readControllerVersion(hostname), hostname);
}
}
return versions;
}
private static Collection<DeploymentStatistics> computeDeploymentStatistics(Set<Version> infrastructureVersions,
List<Application> applications) {
Map<Version, DeploymentStatistics> versionMap = new HashMap<>();
for (Version infrastructureVersion : infrastructureVersions) {
versionMap.put(infrastructureVersion, DeploymentStatistics.empty(infrastructureVersion));
}
ApplicationList applicationList = ApplicationList.from(applications)
.notPullRequest()
.hasProductionDeployment();
for (Application application : applicationList.asList()) {
for (Deployment deployment : application.productionDeployments().values()) {
versionMap.computeIfAbsent(deployment.version(), DeploymentStatistics::empty);
}
JobList.from(application)
.failing()
.not().failingApplicationChange()
.not().failingBecause(outOfCapacity)
.mapToList(job -> job.lastCompleted().get().platform())
.forEach(version -> versionMap.put(version, versionMap.getOrDefault(version, DeploymentStatistics.empty(version)).withFailing(application.id())));
JobList.from(application)
.lastSuccess().present()
.production()
.mapToList(job -> job.lastSuccess().get().platform())
.forEach(version -> versionMap.put(version, versionMap.getOrDefault(version, DeploymentStatistics.empty(version)).withProduction(application.id())));
JobList.from(application)
.upgrading()
.mapToList(job -> job.lastTriggered().get().platform())
.forEach(version -> versionMap.put(version, versionMap.getOrDefault(version, DeploymentStatistics.empty(version)).withDeploying(application.id())));
}
return versionMap.values();
}
private static VespaVersion createVersion(DeploymentStatistics statistics,
boolean isControllerVersion,
boolean isSystemVersion,
Collection<HostName> configServerHostnames,
Controller controller) {
GitSha gitSha = controller.gitHub().getCommit(VESPA_REPO_OWNER, VESPA_REPO, statistics.version().toFullString());
Instant committedAt = Instant.ofEpochMilli(gitSha.commit.author.date.getTime());
VespaVersion.Confidence confidence = controller.curator().readConfidenceOverrides().get(statistics.version());
if (confidence == null) {
if (isSystemVersion || isControllerVersion) {
confidence = VespaVersion.confidenceFrom(statistics, controller);
} else {
confidence = confidenceFor(statistics.version(), controller)
.orElse(VespaVersion.confidenceFrom(statistics, controller));
}
}
return new VespaVersion(statistics,
gitSha.sha, committedAt,
isControllerVersion,
isSystemVersion,
configServerHostnames,
confidence
);
}
/** Returns the current confidence for the given version */
private static Optional<VespaVersion.Confidence> confidenceFor(Version version, Controller controller) {
return controller.versionStatus().versions().stream()
.filter(v -> version.equals(v.versionNumber()))
.map(VespaVersion::confidence)
.findFirst();
}
} | class VersionStatus {
private static final Logger log = Logger.getLogger(VersionStatus.class.getName());
private static final String VESPA_REPO = "vespa-yahoo";
private static final String VESPA_REPO_OWNER = "vespa";
private final ImmutableList<VespaVersion> versions;
/** Create a version status. DO NOT USE: Public for testing and serialization only */
public VersionStatus(List<VespaVersion> versions) {
this.versions = ImmutableList.copyOf(versions);
}
/** Returns the current version of controllers in this system */
public Optional<VespaVersion> controllerVersion() {
return versions().stream().filter(VespaVersion::isControllerVersion).findFirst();
}
/**
* Returns the current Vespa version of the system controlled by this,
* or empty if we have not currently determined what the system version is in this status.
*/
public Optional<VespaVersion> systemVersion() {
return versions().stream().filter(VespaVersion::isSystemVersion).findFirst();
}
/**
* Lists all currently active Vespa versions, with deployment statistics,
* sorted from lowest to highest version number.
* The returned list is immutable.
* Calling this is free, but the returned status is slightly out of date.
*/
public List<VespaVersion> versions() { return versions; }
/** Returns the given version, or null if it is not present */
public VespaVersion version(Version version) {
return versions.stream().filter(v -> v.versionNumber().equals(version)).findFirst().orElse(null);
}
/** Create the empty version status */
public static VersionStatus empty() { return new VersionStatus(ImmutableList.of()); }
/** Create a full, updated version status. This is expensive and should be done infrequently */
private static ListMap<Version, HostName> findSystemApplicationVersions(Controller controller) {
List<ZoneId> zones = controller.zoneRegistry().zones()
.controllerUpgraded()
.ids();
ListMap<Version, HostName> versions = new ListMap<>();
for (ZoneId zone : zones) {
for (SystemApplication application : SystemApplication.all()) {
for (Node node : controller.configServer().nodeRepository().list(zone, application.id())) {
versions.put(node.currentVersion(), node.hostname());
}
}
}
return versions;
}
private static ListMap<Version, HostName> findControllerVersions(Controller controller) {
ListMap<Version, HostName> versions = new ListMap<>();
if (controller.curator().cluster().isEmpty()) {
versions.put(Vtag.currentVersion, controller.hostname());
} else {
for (HostName hostname : controller.curator().cluster()) {
versions.put(controller.curator().readControllerVersion(hostname), hostname);
}
}
return versions;
}
private static Collection<DeploymentStatistics> computeDeploymentStatistics(Set<Version> infrastructureVersions,
List<Application> applications) {
Map<Version, DeploymentStatistics> versionMap = new HashMap<>();
for (Version infrastructureVersion : infrastructureVersions) {
versionMap.put(infrastructureVersion, DeploymentStatistics.empty(infrastructureVersion));
}
ApplicationList applicationList = ApplicationList.from(applications)
.notPullRequest()
.hasProductionDeployment();
for (Application application : applicationList.asList()) {
for (Deployment deployment : application.productionDeployments().values()) {
versionMap.computeIfAbsent(deployment.version(), DeploymentStatistics::empty);
}
JobList.from(application)
.failing()
.not().failingApplicationChange()
.not().failingBecause(outOfCapacity)
.mapToList(job -> job.lastCompleted().get().platform())
.forEach(version -> versionMap.put(version, versionMap.getOrDefault(version, DeploymentStatistics.empty(version)).withFailing(application.id())));
JobList.from(application)
.lastSuccess().present()
.production()
.mapToList(job -> job.lastSuccess().get().platform())
.forEach(version -> versionMap.put(version, versionMap.getOrDefault(version, DeploymentStatistics.empty(version)).withProduction(application.id())));
JobList.from(application)
.upgrading()
.mapToList(job -> job.lastTriggered().get().platform())
.forEach(version -> versionMap.put(version, versionMap.getOrDefault(version, DeploymentStatistics.empty(version)).withDeploying(application.id())));
}
return versionMap.values();
}
private static VespaVersion createVersion(DeploymentStatistics statistics,
boolean isControllerVersion,
boolean isSystemVersion,
Collection<HostName> configServerHostnames,
Controller controller) {
GitSha gitSha = controller.gitHub().getCommit(VESPA_REPO_OWNER, VESPA_REPO, statistics.version().toFullString());
Instant committedAt = Instant.ofEpochMilli(gitSha.commit.author.date.getTime());
VespaVersion.Confidence confidence = controller.curator().readConfidenceOverrides().get(statistics.version());
if (confidence == null) {
if (isSystemVersion || isControllerVersion) {
confidence = VespaVersion.confidenceFrom(statistics, controller);
} else {
confidence = confidenceFor(statistics.version(), controller)
.orElse(VespaVersion.confidenceFrom(statistics, controller));
}
}
return new VespaVersion(statistics,
gitSha.sha, committedAt,
isControllerVersion,
isSystemVersion,
configServerHostnames,
confidence
);
}
/** Returns the current confidence for the given version */
private static Optional<VespaVersion.Confidence> confidenceFor(Version version, Controller controller) {
return controller.versionStatus().versions().stream()
.filter(v -> version.equals(v.versionNumber()))
.map(VespaVersion::confidence)
.findFirst();
}
} |
Tahnks. | private static Optional<ApplicationVersion> requireNotUnknown(Optional<ApplicationVersion> latestVersion) {
Objects.requireNonNull(latestVersion, "latestVersion cannot be null");
latestVersion.ifPresent(version -> {
if (version.isUnknown())
throw new IllegalArgumentException("latstVersion cannot be unknown");
});
return latestVersion;
} | throw new IllegalArgumentException("latstVersion cannot be unknown"); | private static Optional<ApplicationVersion> requireNotUnknown(Optional<ApplicationVersion> latestVersion) {
Objects.requireNonNull(latestVersion, "latestVersion cannot be null");
latestVersion.ifPresent(version -> {
if (version.isUnknown())
throw new IllegalArgumentException("latstVersion cannot be unknown");
});
return latestVersion;
} | class Application {
private final TenantAndApplicationId id;
private final Instant createdAt;
private final DeploymentSpec deploymentSpec;
private final ValidationOverrides validationOverrides;
private final Optional<ApplicationVersion> latestVersion;
private final OptionalLong projectId;
private final boolean internal;
private final Change change;
private final Change outstandingChange;
private final Optional<IssueId> deploymentIssueId;
private final Optional<IssueId> ownershipIssueId;
private final Optional<User> owner;
private final OptionalInt majorVersion;
private final ApplicationMetrics metrics;
private final Set<PublicKey> deployKeys;
private final Map<InstanceName, Instance> instances;
/** Creates an empty application. */
public Application(TenantAndApplicationId id, Instant now) {
this(id, now, DeploymentSpec.empty, ValidationOverrides.empty, Change.empty(), Change.empty(),
Optional.empty(), Optional.empty(), Optional.empty(), OptionalInt.empty(),
new ApplicationMetrics(0, 0), Set.of(), OptionalLong.empty(), false, Optional.empty(), List.of());
}
public Application(TenantAndApplicationId id, Instant createdAt, DeploymentSpec deploymentSpec, ValidationOverrides validationOverrides,
Change change, Change outstandingChange, Optional<IssueId> deploymentIssueId, Optional<IssueId> ownershipIssueId, Optional<User> owner,
OptionalInt majorVersion, ApplicationMetrics metrics, Set<PublicKey> deployKeys, OptionalLong projectId,
boolean internal, Optional<ApplicationVersion> latestVersion, Collection<Instance> instances) {
this.id = Objects.requireNonNull(id, "id cannot be null");
this.createdAt = Objects.requireNonNull(createdAt, "instant of creation cannot be null");
this.deploymentSpec = Objects.requireNonNull(deploymentSpec, "deploymentSpec cannot be null");
this.validationOverrides = Objects.requireNonNull(validationOverrides, "validationOverrides cannot be null");
this.change = Objects.requireNonNull(change, "change cannot be null");
this.outstandingChange = Objects.requireNonNull(outstandingChange, "outstandingChange cannot be null");
this.deploymentIssueId = Objects.requireNonNull(deploymentIssueId, "deploymentIssueId cannot be null");
this.ownershipIssueId = Objects.requireNonNull(ownershipIssueId, "ownershipIssueId cannot be null");
this.owner = Objects.requireNonNull(owner, "owner cannot be null");
this.majorVersion = Objects.requireNonNull(majorVersion, "majorVersion cannot be null");
this.metrics = Objects.requireNonNull(metrics, "metrics cannot be null");
this.deployKeys = Objects.requireNonNull(deployKeys, "deployKeys cannot be null");
this.projectId = Objects.requireNonNull(projectId, "projectId cannot be null");
this.internal = internal;
this.latestVersion = requireNotUnknown(latestVersion);
this.instances = ImmutableSortedMap.copyOf(instances.stream().collect(Collectors.toMap(Instance::name, Function.identity())));
}
public TenantAndApplicationId id() { return id; }
public Instant createdAt() { return createdAt; }
/**
* Returns the last deployed deployment spec of this application,
* or the empty deployment spec if it has never been deployed
*/
public DeploymentSpec deploymentSpec() { return deploymentSpec; }
/** Returns the project id of this application, if it has any. */
public OptionalLong projectId() { return projectId; }
/** Returns the last submitted version of this application. */
public Optional<ApplicationVersion> latestVersion() { return latestVersion; }
/** Returns whether this application is run on the internal deployment pipeline. */
public boolean internal() { return internal; }
/**
* Returns the last deployed validation overrides of this application,
* or the empty validation overrides if it has never been deployed
* (or was deployed with an empty/missing validation overrides)
*/
public ValidationOverrides validationOverrides() { return validationOverrides; }
/** Returns the instances of this application */
public Map<InstanceName, Instance> instances() { return instances; }
/** Returns the instance with the given name, if it exists. */
public Optional<Instance> get(InstanceName instance) { return Optional.ofNullable(instances.get(instance)); }
/** Returns the instance with the given name, or throws. */
public Instance require(InstanceName instance) {
return get(instance).orElseThrow(() -> new IllegalArgumentException("Unknown instance '" + instance + "'"));
}
/**
* Returns base change for this application, i.e., the change that is deployed outside block windows.
* This is empty when no change is currently under deployment.
*/
public Change change() { return change; }
/**
* Returns whether this has an outstanding change (in the source repository), which
* has currently not started deploying (because a deployment is (or was) already in progress
*/
public Change outstandingChange() { return outstandingChange; }
/** Returns ID of any open deployment issue filed for this */
public Optional<IssueId> deploymentIssueId() {
return deploymentIssueId;
}
/** Returns ID of the last ownership issue filed for this */
public Optional<IssueId> ownershipIssueId() {
return ownershipIssueId;
}
public Optional<User> owner() {
return owner;
}
/**
* Overrides the system major version for this application. This override takes effect if the deployment
* spec does not specify a major version.
*/
public OptionalInt majorVersion() { return majorVersion; }
/** Returns metrics for this */
public ApplicationMetrics metrics() {
return metrics;
}
/** Returns activity for this */
public ApplicationActivity activity() {
return ApplicationActivity.from(instances.values().stream()
.flatMap(instance -> instance.deployments().values().stream())
.collect(Collectors.toUnmodifiableList()));
}
public Map<InstanceName, List<Deployment>> productionDeployments() {
return instances.values().stream()
.collect(Collectors.toUnmodifiableMap(Instance::name,
instance -> List.copyOf(instance.productionDeployments().values())));
}
/**
* Returns the oldest platform version this has deployed in a permanent zone (not test or staging).
*
* This is unfortunately quite similar to {@link ApplicationController
* but this checks only what the controller has deployed to the production zones, while that checks the node repository
* to see what's actually installed on each node. Thus, this is the right choice for, e.g., target Vespa versions for
* new deployments, while that is the right choice for version to compile against.
*/
public Optional<Version> oldestDeployedPlatform() {
return productionDeployments().values().stream().flatMap(List::stream)
.map(Deployment::version)
.min(Comparator.naturalOrder());
}
/**
* Returns the oldest application version this has deployed in a permanent zone (not test or staging).
*/
public Optional<ApplicationVersion> oldestDeployedApplication() {
return productionDeployments().values().stream().flatMap(List::stream)
.map(Deployment::applicationVersion)
.min(Comparator.naturalOrder());
}
/** Returns the set of deploy keys for this application. */
public Set<PublicKey> deployKeys() { return deployKeys; }
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (! (o instanceof Application)) return false;
Application that = (Application) o;
return id.equals(that.id);
}
@Override
public int hashCode() {
return id.hashCode();
}
@Override
public String toString() {
return "application '" + id + "'";
}
} | class Application {
private final TenantAndApplicationId id;
private final Instant createdAt;
private final DeploymentSpec deploymentSpec;
private final ValidationOverrides validationOverrides;
private final Optional<ApplicationVersion> latestVersion;
private final OptionalLong projectId;
private final boolean internal;
private final Change change;
private final Change outstandingChange;
private final Optional<IssueId> deploymentIssueId;
private final Optional<IssueId> ownershipIssueId;
private final Optional<User> owner;
private final OptionalInt majorVersion;
private final ApplicationMetrics metrics;
private final Set<PublicKey> deployKeys;
private final Map<InstanceName, Instance> instances;
/** Creates an empty application. */
public Application(TenantAndApplicationId id, Instant now) {
this(id, now, DeploymentSpec.empty, ValidationOverrides.empty, Change.empty(), Change.empty(),
Optional.empty(), Optional.empty(), Optional.empty(), OptionalInt.empty(),
new ApplicationMetrics(0, 0), Set.of(), OptionalLong.empty(), false, Optional.empty(), List.of());
}
public Application(TenantAndApplicationId id, Instant createdAt, DeploymentSpec deploymentSpec, ValidationOverrides validationOverrides,
Change change, Change outstandingChange, Optional<IssueId> deploymentIssueId, Optional<IssueId> ownershipIssueId, Optional<User> owner,
OptionalInt majorVersion, ApplicationMetrics metrics, Set<PublicKey> deployKeys, OptionalLong projectId,
boolean internal, Optional<ApplicationVersion> latestVersion, Collection<Instance> instances) {
this.id = Objects.requireNonNull(id, "id cannot be null");
this.createdAt = Objects.requireNonNull(createdAt, "instant of creation cannot be null");
this.deploymentSpec = Objects.requireNonNull(deploymentSpec, "deploymentSpec cannot be null");
this.validationOverrides = Objects.requireNonNull(validationOverrides, "validationOverrides cannot be null");
this.change = Objects.requireNonNull(change, "change cannot be null");
this.outstandingChange = Objects.requireNonNull(outstandingChange, "outstandingChange cannot be null");
this.deploymentIssueId = Objects.requireNonNull(deploymentIssueId, "deploymentIssueId cannot be null");
this.ownershipIssueId = Objects.requireNonNull(ownershipIssueId, "ownershipIssueId cannot be null");
this.owner = Objects.requireNonNull(owner, "owner cannot be null");
this.majorVersion = Objects.requireNonNull(majorVersion, "majorVersion cannot be null");
this.metrics = Objects.requireNonNull(metrics, "metrics cannot be null");
this.deployKeys = Objects.requireNonNull(deployKeys, "deployKeys cannot be null");
this.projectId = Objects.requireNonNull(projectId, "projectId cannot be null");
this.internal = internal;
this.latestVersion = requireNotUnknown(latestVersion);
this.instances = ImmutableSortedMap.copyOf(instances.stream().collect(Collectors.toMap(Instance::name, Function.identity())));
}
public TenantAndApplicationId id() { return id; }
public Instant createdAt() { return createdAt; }
/**
* Returns the last deployed deployment spec of this application,
* or the empty deployment spec if it has never been deployed
*/
public DeploymentSpec deploymentSpec() { return deploymentSpec; }
/** Returns the project id of this application, if it has any. */
public OptionalLong projectId() { return projectId; }
/** Returns the last submitted version of this application. */
public Optional<ApplicationVersion> latestVersion() { return latestVersion; }
/** Returns whether this application is run on the internal deployment pipeline. */
public boolean internal() { return internal; }
/**
* Returns the last deployed validation overrides of this application,
* or the empty validation overrides if it has never been deployed
* (or was deployed with an empty/missing validation overrides)
*/
public ValidationOverrides validationOverrides() { return validationOverrides; }
/** Returns the instances of this application */
public Map<InstanceName, Instance> instances() { return instances; }
/** Returns the instance with the given name, if it exists. */
public Optional<Instance> get(InstanceName instance) { return Optional.ofNullable(instances.get(instance)); }
/** Returns the instance with the given name, or throws. */
public Instance require(InstanceName instance) {
return get(instance).orElseThrow(() -> new IllegalArgumentException("Unknown instance '" + instance + "'"));
}
/**
* Returns base change for this application, i.e., the change that is deployed outside block windows.
* This is empty when no change is currently under deployment.
*/
public Change change() { return change; }
/**
* Returns whether this has an outstanding change (in the source repository), which
* has currently not started deploying (because a deployment is (or was) already in progress
*/
public Change outstandingChange() { return outstandingChange; }
/** Returns ID of any open deployment issue filed for this */
public Optional<IssueId> deploymentIssueId() {
return deploymentIssueId;
}
/** Returns ID of the last ownership issue filed for this */
public Optional<IssueId> ownershipIssueId() {
return ownershipIssueId;
}
public Optional<User> owner() {
return owner;
}
/**
* Overrides the system major version for this application. This override takes effect if the deployment
* spec does not specify a major version.
*/
public OptionalInt majorVersion() { return majorVersion; }
/** Returns metrics for this */
public ApplicationMetrics metrics() {
return metrics;
}
/** Returns activity for this */
public ApplicationActivity activity() {
return ApplicationActivity.from(instances.values().stream()
.flatMap(instance -> instance.deployments().values().stream())
.collect(Collectors.toUnmodifiableList()));
}
public Map<InstanceName, List<Deployment>> productionDeployments() {
return instances.values().stream()
.collect(Collectors.toUnmodifiableMap(Instance::name,
instance -> List.copyOf(instance.productionDeployments().values())));
}
/**
* Returns the oldest platform version this has deployed in a permanent zone (not test or staging).
*
* This is unfortunately quite similar to {@link ApplicationController
* but this checks only what the controller has deployed to the production zones, while that checks the node repository
* to see what's actually installed on each node. Thus, this is the right choice for, e.g., target Vespa versions for
* new deployments, while that is the right choice for version to compile against.
*/
public Optional<Version> oldestDeployedPlatform() {
return productionDeployments().values().stream().flatMap(List::stream)
.map(Deployment::version)
.min(Comparator.naturalOrder());
}
/**
* Returns the oldest application version this has deployed in a permanent zone (not test or staging).
*/
public Optional<ApplicationVersion> oldestDeployedApplication() {
return productionDeployments().values().stream().flatMap(List::stream)
.map(Deployment::applicationVersion)
.min(Comparator.naturalOrder());
}
/** Returns the set of deploy keys for this application. */
public Set<PublicKey> deployKeys() { return deployKeys; }
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (! (o instanceof Application)) return false;
Application that = (Application) o;
return id.equals(that.id);
}
@Override
public int hashCode() {
return id.hashCode();
}
@Override
public String toString() {
return "application '" + id + "'";
}
} |
Yes. | public static VersionStatus compute(Controller controller) {
ListMap<Version, HostName> configServerVersions = findSystemApplicationVersions(controller);
ListMap<Version, HostName> controllerVersions = findControllerVersions(controller);
Set<Version> infrastructureVersions = new HashSet<>();
infrastructureVersions.addAll(controllerVersions.keySet());
infrastructureVersions.addAll(configServerVersions.keySet());
Version controllerVersion = controllerVersions.keySet().stream().sorted().findFirst().get();
Version systemVersion = infrastructureVersions.stream().sorted().findFirst().get();
Collection<DeploymentStatistics> deploymentStatistics = computeDeploymentStatistics(infrastructureVersions,
controller.applications().asList());
List<VespaVersion> versions = new ArrayList<>();
for (DeploymentStatistics statistics : deploymentStatistics) {
if (statistics.version().isEmpty()) continue;
try {
VespaVersion vespaVersion = createVersion(statistics,
statistics.version().equals(controllerVersion),
statistics.version().equals(systemVersion),
configServerVersions.getList(statistics.version()),
controller);
versions.add(vespaVersion);
} catch (IllegalArgumentException e) {
log.log(Level.WARNING, "Unable to create VespaVersion for version " +
statistics.version().toFullString(), e);
}
}
Collections.sort(versions);
return new VersionStatus(versions);
} | ListMap<Version, HostName> configServerVersions = findSystemApplicationVersions(controller); | public static VersionStatus compute(Controller controller) {
ListMap<Version, HostName> systemApplicationVersions = findSystemApplicationVersions(controller);
ListMap<Version, HostName> controllerVersions = findControllerVersions(controller);
Set<Version> infrastructureVersions = new HashSet<>();
infrastructureVersions.addAll(controllerVersions.keySet());
infrastructureVersions.addAll(systemApplicationVersions.keySet());
Version controllerVersion = controllerVersions.keySet().stream().sorted().findFirst().get();
Version systemVersion = infrastructureVersions.stream().sorted().findFirst().get();
Collection<DeploymentStatistics> deploymentStatistics = computeDeploymentStatistics(infrastructureVersions,
controller.applications().asList());
List<VespaVersion> versions = new ArrayList<>();
for (DeploymentStatistics statistics : deploymentStatistics) {
if (statistics.version().isEmpty()) continue;
try {
VespaVersion vespaVersion = createVersion(statistics,
statistics.version().equals(controllerVersion),
statistics.version().equals(systemVersion),
systemApplicationVersions.getList(statistics.version()),
controller);
versions.add(vespaVersion);
} catch (IllegalArgumentException e) {
log.log(Level.WARNING, "Unable to create VespaVersion for version " +
statistics.version().toFullString(), e);
}
}
Collections.sort(versions);
return new VersionStatus(versions);
} | class VersionStatus {
private static final Logger log = Logger.getLogger(VersionStatus.class.getName());
private static final String VESPA_REPO = "vespa-yahoo";
private static final String VESPA_REPO_OWNER = "vespa";
private final ImmutableList<VespaVersion> versions;
/** Create a version status. DO NOT USE: Public for testing and serialization only */
public VersionStatus(List<VespaVersion> versions) {
this.versions = ImmutableList.copyOf(versions);
}
/** Returns the current version of controllers in this system */
public Optional<VespaVersion> controllerVersion() {
return versions().stream().filter(VespaVersion::isControllerVersion).findFirst();
}
/**
* Returns the current Vespa version of the system controlled by this,
* or empty if we have not currently determined what the system version is in this status.
*/
public Optional<VespaVersion> systemVersion() {
return versions().stream().filter(VespaVersion::isSystemVersion).findFirst();
}
/**
* Lists all currently active Vespa versions, with deployment statistics,
* sorted from lowest to highest version number.
* The returned list is immutable.
* Calling this is free, but the returned status is slightly out of date.
*/
public List<VespaVersion> versions() { return versions; }
/** Returns the given version, or null if it is not present */
public VespaVersion version(Version version) {
return versions.stream().filter(v -> v.versionNumber().equals(version)).findFirst().orElse(null);
}
/** Create the empty version status */
public static VersionStatus empty() { return new VersionStatus(ImmutableList.of()); }
/** Create a full, updated version status. This is expensive and should be done infrequently */
private static ListMap<Version, HostName> findSystemApplicationVersions(Controller controller) {
List<ZoneId> zones = controller.zoneRegistry().zones()
.controllerManaged()
.not().among(ZoneId.from("prod.cd-us-east-1a"),
ZoneId.from("prod.cd-aws-us-east-1a"),
ZoneId.from("prod.aws-us-east-1a"),
ZoneId.from("dev.aws-us-east-2a"))
.ids();
ListMap<Version, HostName> versions = new ListMap<>();
for (SystemApplication application : SystemApplication.all()) {
for (ZoneId zone : zones) {
for (Node node : controller.configServer().nodeRepository().list(zone, application.id())) {
versions.put(node.currentVersion(), node.hostname());
}
}
}
return versions;
}
private static ListMap<Version, HostName> findControllerVersions(Controller controller) {
ListMap<Version, HostName> versions = new ListMap<>();
if (controller.curator().cluster().isEmpty()) {
versions.put(Vtag.currentVersion, controller.hostname());
} else {
for (HostName hostname : controller.curator().cluster()) {
versions.put(controller.curator().readControllerVersion(hostname), hostname);
}
}
return versions;
}
private static Collection<DeploymentStatistics> computeDeploymentStatistics(Set<Version> infrastructureVersions,
List<Application> applications) {
Map<Version, DeploymentStatistics> versionMap = new HashMap<>();
for (Version infrastructureVersion : infrastructureVersions) {
versionMap.put(infrastructureVersion, DeploymentStatistics.empty(infrastructureVersion));
}
ApplicationList applicationList = ApplicationList.from(applications)
.notPullRequest()
.hasProductionDeployment();
for (Application application : applicationList.asList()) {
for (Deployment deployment : application.productionDeployments().values()) {
versionMap.computeIfAbsent(deployment.version(), DeploymentStatistics::empty);
}
JobList.from(application)
.failing()
.not().failingApplicationChange()
.not().failingBecause(outOfCapacity)
.mapToList(job -> job.lastCompleted().get().platform())
.forEach(version -> versionMap.put(version, versionMap.getOrDefault(version, DeploymentStatistics.empty(version)).withFailing(application.id())));
JobList.from(application)
.lastSuccess().present()
.production()
.mapToList(job -> job.lastSuccess().get().platform())
.forEach(version -> versionMap.put(version, versionMap.getOrDefault(version, DeploymentStatistics.empty(version)).withProduction(application.id())));
JobList.from(application)
.upgrading()
.mapToList(job -> job.lastTriggered().get().platform())
.forEach(version -> versionMap.put(version, versionMap.getOrDefault(version, DeploymentStatistics.empty(version)).withDeploying(application.id())));
}
return versionMap.values();
}
private static VespaVersion createVersion(DeploymentStatistics statistics,
boolean isControllerVersion,
boolean isSystemVersion,
Collection<HostName> configServerHostnames,
Controller controller) {
GitSha gitSha = controller.gitHub().getCommit(VESPA_REPO_OWNER, VESPA_REPO, statistics.version().toFullString());
Instant committedAt = Instant.ofEpochMilli(gitSha.commit.author.date.getTime());
VespaVersion.Confidence confidence = controller.curator().readConfidenceOverrides().get(statistics.version());
if (confidence == null) {
if (isSystemVersion || isControllerVersion) {
confidence = VespaVersion.confidenceFrom(statistics, controller);
} else {
confidence = confidenceFor(statistics.version(), controller)
.orElse(VespaVersion.confidenceFrom(statistics, controller));
}
}
return new VespaVersion(statistics,
gitSha.sha, committedAt,
isControllerVersion,
isSystemVersion,
configServerHostnames,
confidence
);
}
/** Returns the current confidence for the given version */
private static Optional<VespaVersion.Confidence> confidenceFor(Version version, Controller controller) {
return controller.versionStatus().versions().stream()
.filter(v -> version.equals(v.versionNumber()))
.map(VespaVersion::confidence)
.findFirst();
}
} | class VersionStatus {
private static final Logger log = Logger.getLogger(VersionStatus.class.getName());
private static final String VESPA_REPO = "vespa-yahoo";
private static final String VESPA_REPO_OWNER = "vespa";
private final ImmutableList<VespaVersion> versions;
/** Create a version status. DO NOT USE: Public for testing and serialization only */
public VersionStatus(List<VespaVersion> versions) {
this.versions = ImmutableList.copyOf(versions);
}
/** Returns the current version of controllers in this system */
public Optional<VespaVersion> controllerVersion() {
return versions().stream().filter(VespaVersion::isControllerVersion).findFirst();
}
/**
* Returns the current Vespa version of the system controlled by this,
* or empty if we have not currently determined what the system version is in this status.
*/
public Optional<VespaVersion> systemVersion() {
return versions().stream().filter(VespaVersion::isSystemVersion).findFirst();
}
/**
* Lists all currently active Vespa versions, with deployment statistics,
* sorted from lowest to highest version number.
* The returned list is immutable.
* Calling this is free, but the returned status is slightly out of date.
*/
public List<VespaVersion> versions() { return versions; }
/** Returns the given version, or null if it is not present */
public VespaVersion version(Version version) {
return versions.stream().filter(v -> v.versionNumber().equals(version)).findFirst().orElse(null);
}
/** Create the empty version status */
public static VersionStatus empty() { return new VersionStatus(ImmutableList.of()); }
/** Create a full, updated version status. This is expensive and should be done infrequently */
private static ListMap<Version, HostName> findSystemApplicationVersions(Controller controller) {
List<ZoneId> zones = controller.zoneRegistry().zones()
.controllerUpgraded()
.ids();
ListMap<Version, HostName> versions = new ListMap<>();
for (ZoneId zone : zones) {
for (SystemApplication application : SystemApplication.all()) {
for (Node node : controller.configServer().nodeRepository().list(zone, application.id())) {
versions.put(node.currentVersion(), node.hostname());
}
}
}
return versions;
}
private static ListMap<Version, HostName> findControllerVersions(Controller controller) {
ListMap<Version, HostName> versions = new ListMap<>();
if (controller.curator().cluster().isEmpty()) {
versions.put(Vtag.currentVersion, controller.hostname());
} else {
for (HostName hostname : controller.curator().cluster()) {
versions.put(controller.curator().readControllerVersion(hostname), hostname);
}
}
return versions;
}
private static Collection<DeploymentStatistics> computeDeploymentStatistics(Set<Version> infrastructureVersions,
List<Application> applications) {
Map<Version, DeploymentStatistics> versionMap = new HashMap<>();
for (Version infrastructureVersion : infrastructureVersions) {
versionMap.put(infrastructureVersion, DeploymentStatistics.empty(infrastructureVersion));
}
ApplicationList applicationList = ApplicationList.from(applications)
.notPullRequest()
.hasProductionDeployment();
for (Application application : applicationList.asList()) {
for (Deployment deployment : application.productionDeployments().values()) {
versionMap.computeIfAbsent(deployment.version(), DeploymentStatistics::empty);
}
JobList.from(application)
.failing()
.not().failingApplicationChange()
.not().failingBecause(outOfCapacity)
.mapToList(job -> job.lastCompleted().get().platform())
.forEach(version -> versionMap.put(version, versionMap.getOrDefault(version, DeploymentStatistics.empty(version)).withFailing(application.id())));
JobList.from(application)
.lastSuccess().present()
.production()
.mapToList(job -> job.lastSuccess().get().platform())
.forEach(version -> versionMap.put(version, versionMap.getOrDefault(version, DeploymentStatistics.empty(version)).withProduction(application.id())));
JobList.from(application)
.upgrading()
.mapToList(job -> job.lastTriggered().get().platform())
.forEach(version -> versionMap.put(version, versionMap.getOrDefault(version, DeploymentStatistics.empty(version)).withDeploying(application.id())));
}
return versionMap.values();
}
private static VespaVersion createVersion(DeploymentStatistics statistics,
boolean isControllerVersion,
boolean isSystemVersion,
Collection<HostName> configServerHostnames,
Controller controller) {
GitSha gitSha = controller.gitHub().getCommit(VESPA_REPO_OWNER, VESPA_REPO, statistics.version().toFullString());
Instant committedAt = Instant.ofEpochMilli(gitSha.commit.author.date.getTime());
VespaVersion.Confidence confidence = controller.curator().readConfidenceOverrides().get(statistics.version());
if (confidence == null) {
if (isSystemVersion || isControllerVersion) {
confidence = VespaVersion.confidenceFrom(statistics, controller);
} else {
confidence = confidenceFor(statistics.version(), controller)
.orElse(VespaVersion.confidenceFrom(statistics, controller));
}
}
return new VespaVersion(statistics,
gitSha.sha, committedAt,
isControllerVersion,
isSystemVersion,
configServerHostnames,
confidence
);
}
/** Returns the current confidence for the given version */
private static Optional<VespaVersion.Confidence> confidenceFor(Version version, Controller controller) {
return controller.versionStatus().versions().stream()
.filter(v -> version.equals(v.versionNumber()))
.map(VespaVersion::confidence)
.findFirst();
}
} |
I'd prefer the below, but no hassle :) ```java (__, map) --> { if (map == null) map = new HashMap<>(); nodes.forEach(node -> v.put(node.hostname(), node)); return map; } | public void add(ZoneId zone, List<Node> nodes) {
nodeRepository.compute(zone, (k, v) -> {
if (v == null) {
return nodes.stream().collect(Collectors.toMap(Node::hostname, Function.identity()));
} else {
nodes.forEach(node -> v.put(node.hostname(), node));
return v;
}
});
} | return v; | public void add(ZoneId zone, List<Node> nodes) {
nodeRepository.compute(zone, (k, v) -> {
if (v == null) {
v = new HashMap<>();
}
for (Node node : nodes) {
v.put(node.hostname(), node);
}
return v;
});
} | class NodeRepositoryMock implements NodeRepository {
private final Map<ZoneId, Map<HostName, Node>> nodeRepository = new HashMap<>();
public void add(ZoneId zone, Node node) {
add(zone, Collections.singletonList(node));
}
public void clear() {
nodeRepository.clear();
}
@Override
public List<Node> list(ZoneId zone, ApplicationId application) {
return nodeRepository.getOrDefault(zone, Collections.emptyMap()).values().stream()
.filter(node -> node.owner().map(application::equals).orElse(false))
.collect(Collectors.toList());
}
@Override
public void upgrade(ZoneId zone, NodeType type, Version version) {
nodeRepository.getOrDefault(zone, Collections.emptyMap()).values()
.stream()
.filter(node -> node.type() == type)
.map(node -> new Node(node.hostname(), node.type(), node.owner(), node.currentVersion(),
version))
.forEach(node -> add(zone, node));
}
} | class NodeRepositoryMock implements NodeRepository {
private final Map<ZoneId, Map<HostName, Node>> nodeRepository = new HashMap<>();
public void add(ZoneId zone, Node node) {
add(zone, Collections.singletonList(node));
}
public void clear() {
nodeRepository.clear();
}
@Override
public List<Node> list(ZoneId zone, ApplicationId application) {
return nodeRepository.getOrDefault(zone, Collections.emptyMap()).values().stream()
.filter(node -> node.owner().map(application::equals).orElse(false))
.collect(Collectors.toList());
}
@Override
public void upgrade(ZoneId zone, NodeType type, Version version) {
nodeRepository.getOrDefault(zone, Collections.emptyMap()).values()
.stream()
.filter(node -> node.type() == type)
.map(node -> new Node(node.hostname(), node.type(), node.owner(), node.currentVersion(),
version))
.forEach(node -> add(zone, node));
}
} |
(Why LinkedList?) | public List<String> toRules(IPVersion ipVersion) {
List<String> rules = new LinkedList<>();
rules.add("-P INPUT ACCEPT");
rules.add("-P FORWARD ACCEPT");
rules.add("-P OUTPUT ACCEPT");
rules.add( "-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT");
rules.add("-A INPUT -i lo -j ACCEPT");
rules.add("-A INPUT -p " + ipVersion.icmpProtocol() + " -j ACCEPT");
String commaSeparatedPorts = trustedPorts.stream().map(i -> Integer.toString(i)).sorted().collect(Collectors.joining(","));
if (!commaSeparatedPorts.isEmpty())
rules.add("-A INPUT -p tcp -m multiport --dports " + commaSeparatedPorts + " -j ACCEPT");
trustedNodes.stream()
.filter(ipVersion::match)
.map(ipAddress -> "-A INPUT -s " + InetAddresses.toAddrString(ipAddress) + ipVersion.singleHostCidr() + " -j ACCEPT")
.sorted()
.forEach(rules::add);
rules.add("-A INPUT -j REJECT --reject-with " + ipVersion.icmpPortUnreachable());
return Collections.unmodifiableList(rules);
} | List<String> rules = new LinkedList<>(); | public List<String> toRules(IPVersion ipVersion) {
List<String> rules = new LinkedList<>();
rules.add("-P INPUT ACCEPT");
rules.add("-P FORWARD ACCEPT");
rules.add("-P OUTPUT ACCEPT");
rules.add( "-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT");
rules.add("-A INPUT -i lo -j ACCEPT");
rules.add("-A INPUT -p " + ipVersion.icmpProtocol() + " -j ACCEPT");
String commaSeparatedPorts = trustedPorts.stream().map(i -> Integer.toString(i)).sorted().collect(Collectors.joining(","));
if (!commaSeparatedPorts.isEmpty())
rules.add("-A INPUT -p tcp -m multiport --dports " + commaSeparatedPorts + " -j ACCEPT");
trustedNodes.stream()
.filter(ipVersion::match)
.map(ipAddress -> "-A INPUT -s " + InetAddresses.toAddrString(ipAddress) + ipVersion.singleHostCidr() + " -j ACCEPT")
.sorted()
.forEach(rules::add);
rules.add("-A INPUT -j REJECT --reject-with " + ipVersion.icmpPortUnreachable());
return Collections.unmodifiableList(rules);
} | class Acl {
private final List<InetAddress> trustedNodes;
private final List<Integer> trustedPorts;
/**
* @param trustedPorts Ports that hostname should trust
* @param trustedNodes Other hostnames that this hostname should trust
*/
public Acl(List<Integer> trustedPorts, List<InetAddress> trustedNodes) {
this.trustedNodes = trustedNodes != null ? ImmutableList.copyOf(trustedNodes) : Collections.emptyList();
this.trustedPorts = trustedPorts != null ? ImmutableList.copyOf(trustedPorts) : Collections.emptyList();
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Acl that = (Acl) o;
return Objects.equals(trustedPorts, that.trustedPorts) &&
Objects.equals(trustedNodes, that.trustedNodes);
}
@Override
public int hashCode() {
return Objects.hash(trustedPorts, trustedNodes);
}
} | class Acl {
private final List<InetAddress> trustedNodes;
private final List<Integer> trustedPorts;
/**
* @param trustedPorts Ports that hostname should trust
* @param trustedNodes Other hostnames that this hostname should trust
*/
public Acl(List<Integer> trustedPorts, List<InetAddress> trustedNodes) {
this.trustedNodes = trustedNodes != null ? ImmutableList.copyOf(trustedNodes) : Collections.emptyList();
this.trustedPorts = trustedPorts != null ? ImmutableList.copyOf(trustedPorts) : Collections.emptyList();
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Acl that = (Acl) o;
return Objects.equals(trustedPorts, that.trustedPorts) &&
Objects.equals(trustedNodes, that.trustedNodes);
}
@Override
public int hashCode() {
return Objects.hash(trustedPorts, trustedNodes);
}
} |
Because the final list size depends on how many IPs we have, and we only use this list to add elements which works well with LinkedList as we don't need to resize underlying array. Although I see now that calculating final list size is easy, as it is some constant + number of trusted nodes, so could use ArrayList with predefined size. | public List<String> toRules(IPVersion ipVersion) {
List<String> rules = new LinkedList<>();
rules.add("-P INPUT ACCEPT");
rules.add("-P FORWARD ACCEPT");
rules.add("-P OUTPUT ACCEPT");
rules.add( "-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT");
rules.add("-A INPUT -i lo -j ACCEPT");
rules.add("-A INPUT -p " + ipVersion.icmpProtocol() + " -j ACCEPT");
String commaSeparatedPorts = trustedPorts.stream().map(i -> Integer.toString(i)).sorted().collect(Collectors.joining(","));
if (!commaSeparatedPorts.isEmpty())
rules.add("-A INPUT -p tcp -m multiport --dports " + commaSeparatedPorts + " -j ACCEPT");
trustedNodes.stream()
.filter(ipVersion::match)
.map(ipAddress -> "-A INPUT -s " + InetAddresses.toAddrString(ipAddress) + ipVersion.singleHostCidr() + " -j ACCEPT")
.sorted()
.forEach(rules::add);
rules.add("-A INPUT -j REJECT --reject-with " + ipVersion.icmpPortUnreachable());
return Collections.unmodifiableList(rules);
} | List<String> rules = new LinkedList<>(); | public List<String> toRules(IPVersion ipVersion) {
List<String> rules = new LinkedList<>();
rules.add("-P INPUT ACCEPT");
rules.add("-P FORWARD ACCEPT");
rules.add("-P OUTPUT ACCEPT");
rules.add( "-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT");
rules.add("-A INPUT -i lo -j ACCEPT");
rules.add("-A INPUT -p " + ipVersion.icmpProtocol() + " -j ACCEPT");
String commaSeparatedPorts = trustedPorts.stream().map(i -> Integer.toString(i)).sorted().collect(Collectors.joining(","));
if (!commaSeparatedPorts.isEmpty())
rules.add("-A INPUT -p tcp -m multiport --dports " + commaSeparatedPorts + " -j ACCEPT");
trustedNodes.stream()
.filter(ipVersion::match)
.map(ipAddress -> "-A INPUT -s " + InetAddresses.toAddrString(ipAddress) + ipVersion.singleHostCidr() + " -j ACCEPT")
.sorted()
.forEach(rules::add);
rules.add("-A INPUT -j REJECT --reject-with " + ipVersion.icmpPortUnreachable());
return Collections.unmodifiableList(rules);
} | class Acl {
private final List<InetAddress> trustedNodes;
private final List<Integer> trustedPorts;
/**
* @param trustedPorts Ports that hostname should trust
* @param trustedNodes Other hostnames that this hostname should trust
*/
public Acl(List<Integer> trustedPorts, List<InetAddress> trustedNodes) {
this.trustedNodes = trustedNodes != null ? ImmutableList.copyOf(trustedNodes) : Collections.emptyList();
this.trustedPorts = trustedPorts != null ? ImmutableList.copyOf(trustedPorts) : Collections.emptyList();
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Acl that = (Acl) o;
return Objects.equals(trustedPorts, that.trustedPorts) &&
Objects.equals(trustedNodes, that.trustedNodes);
}
@Override
public int hashCode() {
return Objects.hash(trustedPorts, trustedNodes);
}
} | class Acl {
private final List<InetAddress> trustedNodes;
private final List<Integer> trustedPorts;
/**
* @param trustedPorts Ports that hostname should trust
* @param trustedNodes Other hostnames that this hostname should trust
*/
public Acl(List<Integer> trustedPorts, List<InetAddress> trustedNodes) {
this.trustedNodes = trustedNodes != null ? ImmutableList.copyOf(trustedNodes) : Collections.emptyList();
this.trustedPorts = trustedPorts != null ? ImmutableList.copyOf(trustedPorts) : Collections.emptyList();
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Acl that = (Acl) o;
return Objects.equals(trustedPorts, that.trustedPorts) &&
Objects.equals(trustedNodes, that.trustedNodes);
}
@Override
public int hashCode() {
return Objects.hash(trustedPorts, trustedNodes);
}
} |
Consider logging a warning/severe message if the subscriber has not been closed prior to being closed by finalizer. | protected void finalize() throws Throwable {
try {
if (!isClosed()) {
close();
}
} finally {
super.finalize();
}
} | if (!isClosed()) { | protected void finalize() throws Throwable {
try {
if (!isClosed()) {
close();
}
} finally {
super.finalize();
}
} | class implementing {@link java.lang.Runnable} | class implementing {@link java.lang.Runnable} |
:boom: | public Optional<Match> moveForwardToStartOfMatch(Pattern pattern) {
return moveForwardToXOfMatch(pattern, match -> position = match.startOfMatch());
} | return moveForwardToXOfMatch(pattern, match -> position = match.startOfMatch()); | public Optional<Match> moveForwardToStartOfMatch(Pattern pattern) {
return moveForwardToXOfMatch(pattern, match -> position = match.startOfMatch());
} | class CursorImpl implements Cursor {
private final TextBuffer textBuffer;
private final Object unique = new Object();
private Position position;
/**
* Creates a cursor to a text buffer.
*
* WARNING: The text buffer MUST NOT be accessed outside this cursor. This cursor
* takes sole ownership of the text buffer.
*
* @param textBuffer the text buffer this cursor owns and operates on
*/
CursorImpl(TextBuffer textBuffer) {
this.textBuffer = textBuffer;
position = textBuffer.getStartOfText();
}
@Override
public Position getPosition() {
return position;
}
@Override
public Mark createMark() {
return new Mark(position, textBuffer.getVersion(), unique);
}
@Override
public String getBufferText() {
return textBuffer.getString();
}
@Override
public String getLine() {
return textBuffer.getLine(position);
}
@Override
public String getPrefix() {
return textBuffer.getLinePrefix(position);
}
@Override
public String getSuffix() {
return textBuffer.getLineSuffix(position);
}
@Override
public String getTextTo(Mark mark) {
validateMark(mark);
Position start = mark.position();
Position end = position;
if (start.isAfter(end)) {
Position tmp = start;
start = end;
end = tmp;
}
return textBuffer.getSubstring(start, end);
}
@Override
public Cursor moveToStartOfBuffer() {
position = textBuffer.getStartOfText();
return this;
}
@Override
public Cursor moveToEndOfBuffer() {
position = textBuffer.getEndOfText();
return this;
}
@Override
public Cursor moveToStartOfLine() {
position = textBuffer.getStartOfLine(position);
return this;
}
@Override
public Cursor moveToStartOfPreviousLine() {
position = textBuffer.getStartOfPreviousLine(position);
return this;
}
@Override
public Cursor moveToStartOfNextLine() {
position = textBuffer.getStartOfNextLine(position);
return this;
}
@Override
public Cursor moveToStartOf(int lineIndex) {
validateLineIndex(lineIndex);
position = new Position(lineIndex, 0);
return this;
}
@Override
public Cursor moveToEndOfLine() {
position = textBuffer.getEndOfLine(position);
return this;
}
@Override
public Cursor moveToEndOfPreviousLine() {
return moveToStartOfPreviousLine().moveToEndOfLine();
}
@Override
public Cursor moveToEndOfNextLine() {
return moveToStartOfNextLine().moveToEndOfLine();
}
@Override
public Cursor moveToEndOf(int lineIndex) {
return moveToStartOf(lineIndex).moveToEndOfLine();
}
@Override
public Cursor moveForward() {
return moveForward(1);
}
@Override
public Cursor moveForward(int times) {
position = textBuffer.forward(position, times);
return this;
}
@Override
public Cursor moveBackward() {
return moveBackward(1);
}
@Override
public Cursor moveBackward(int times) {
position = textBuffer.backward(position, times);
return this;
}
@Override
public Cursor moveTo(Mark mark) {
validateMark(mark);
position = mark.position();
return this;
}
@Override
public boolean skipBackward(String text) {
String prefix = getPrefix();
if (prefix.endsWith(text)) {
position = new Position(position.lineIndex(), position.columnIndex() - text.length());
return true;
} else {
return false;
}
}
@Override
public boolean skipForward(String text) {
String suffix = getSuffix();
if (suffix.startsWith(text)) {
position = new Position(position.lineIndex(), position.columnIndex() + text.length());
return true;
} else {
return false;
}
}
@Override
@Override
public Optional<Match> moveForwardToEndOfMatch(Pattern pattern) {
return moveForwardToXOfMatch(pattern, match -> position = match.endOfMatch());
}
private Optional<Match> moveForwardToXOfMatch(Pattern pattern, Consumer<Match> callback) {
Optional<Match> match = textBuffer.findForward(position, pattern);
match.ifPresent(callback);
return match;
}
@Override
public Cursor moveTo(Position position) {
validatePosition(position);
this.position = position;
return this;
}
@Override
public Cursor moveTo(int lineIndex, int columnIndex) {
return moveTo(new Position(lineIndex, columnIndex));
}
@Override
public Cursor write(String text) {
position = textBuffer.write(position, text);
return this;
}
@Override
public Cursor writeLine(String line) {
return write(line).write("\n");
}
@Override
public Cursor writeLines(String... lines) {
return writeLines(Arrays.asList(lines));
}
@Override
public Cursor writeLines(List<String> lines) {
return writeLine(String.join("\n", lines));
}
@Override
public Cursor writeNewline() {
return write("\n");
}
@Override
public Cursor writeNewlineAfter() {
return writeNewline().moveBackward();
}
@Override
public Cursor deleteAll() {
moveToStartOfBuffer();
textBuffer.clear();
return this;
}
@Override
public Cursor deleteLine() {
moveToStartOfLine();
textBuffer.delete(position, textBuffer.getStartOfNextLine(position));
return this;
}
@Override
public Cursor deletePrefix() {
Position originalPosition = position;
moveToStartOfLine();
textBuffer.delete(position, originalPosition);
return this;
}
@Override
public Cursor deleteSuffix() {
textBuffer.delete(position, textBuffer.getEndOfLine(position));
return this;
}
@Override
public Cursor deleteForward() {
return deleteForward(1);
}
@Override
public Cursor deleteForward(int times) {
Position end = textBuffer.forward(position, times);
textBuffer.delete(position, end);
return this;
}
@Override
public Cursor deleteBackward() {
return deleteBackward(1);
}
@Override
public Cursor deleteBackward(int times) {
Position end = position;
moveBackward(times);
textBuffer.delete(position, end);
return this;
}
@Override
public Cursor deleteTo(Mark mark) {
Position start = mark.position();
Position end = position;
if (start.isAfter(end)) {
Position tmp = start;
start = end;
end = tmp;
}
textBuffer.delete(start, end);
return this;
}
@Override
public boolean replaceMatch(Pattern pattern, Function<Match, String> replacer) {
Optional<Match> match = moveForwardToStartOfMatch(pattern);
if (!match.isPresent()) {
return false;
}
textBuffer.delete(match.get().startOfMatch(), match.get().endOfMatch());
write(replacer.apply(match.get()));
return true;
}
@Override
public int replaceMatches(Pattern pattern, Function<Match, String> replacer) {
int count = 0;
for (; replaceMatch(pattern, replacer); ++count) {
}
return count;
}
private void validatePosition(Position position) {
validateLineIndex(position.lineIndex());
int maxColumnIndex = textBuffer.getLine(position.lineIndex()).length();
if (position.columnIndex() < 0 || position.columnIndex() > maxColumnIndex) {
throw new IndexOutOfBoundsException("Column index of " + position.coordinateString() +
" is not in permitted range [0," + maxColumnIndex + "]");
}
}
private void validateLineIndex(int lineIndex) {
int maxLineIndex = textBuffer.getMaxLineIndex();
if (lineIndex < 0 || lineIndex > maxLineIndex) {
throw new IndexOutOfBoundsException("Line index " + lineIndex +
" not in permitted range [0," + maxLineIndex + "]");
}
}
private void validateMark(Mark mark) {
if (mark.secret() != unique) {
throw new IllegalArgumentException("Unknown mark " + mark);
}
if (!mark.version().equals(textBuffer.getVersion())) {
throw new IllegalArgumentException("Mark " + mark + " is outdated");
}
}
} | class CursorImpl implements Cursor {
private final TextBuffer textBuffer;
private final Object unique = new Object();
private Position position;
/**
* Creates a cursor to a text buffer.
*
* WARNING: The text buffer MUST NOT be accessed outside this cursor. This cursor
* takes sole ownership of the text buffer.
*
* @param textBuffer the text buffer this cursor owns and operates on
*/
CursorImpl(TextBuffer textBuffer) {
this.textBuffer = textBuffer;
position = textBuffer.getStartOfText();
}
@Override
public Position getPosition() {
return position;
}
@Override
public Mark createMark() {
return new Mark(position, textBuffer.getVersion(), unique);
}
@Override
public String getBufferText() {
return textBuffer.getString();
}
@Override
public String getLine() {
return textBuffer.getLine(position);
}
@Override
public String getPrefix() {
return textBuffer.getLinePrefix(position);
}
@Override
public String getSuffix() {
return textBuffer.getLineSuffix(position);
}
@Override
public String getTextTo(Mark mark) {
validateMark(mark);
Position start = min(mark.position(), position);
Position end = max(mark.position(), position);
return textBuffer.getSubstring(start, end);
}
@Override
public Cursor moveToStartOfBuffer() {
position = textBuffer.getStartOfText();
return this;
}
@Override
public Cursor moveToEndOfBuffer() {
position = textBuffer.getEndOfText();
return this;
}
@Override
public Cursor moveToStartOfLine() {
position = textBuffer.getStartOfLine(position);
return this;
}
@Override
public Cursor moveToStartOfPreviousLine() {
position = textBuffer.getStartOfPreviousLine(position);
return this;
}
@Override
public Cursor moveToStartOfNextLine() {
position = textBuffer.getStartOfNextLine(position);
return this;
}
@Override
public Cursor moveToStartOf(int lineIndex) {
validateLineIndex(lineIndex);
position = new Position(lineIndex, 0);
return this;
}
@Override
public Cursor moveToEndOfLine() {
position = textBuffer.getEndOfLine(position);
return this;
}
@Override
public Cursor moveToEndOfPreviousLine() {
return moveToStartOfPreviousLine().moveToEndOfLine();
}
@Override
public Cursor moveToEndOfNextLine() {
return moveToStartOfNextLine().moveToEndOfLine();
}
@Override
public Cursor moveToEndOf(int lineIndex) {
return moveToStartOf(lineIndex).moveToEndOfLine();
}
@Override
public Cursor moveForward() {
return moveForward(1);
}
@Override
public Cursor moveForward(int times) {
position = textBuffer.forward(position, times);
return this;
}
@Override
public Cursor moveBackward() {
return moveBackward(1);
}
@Override
public Cursor moveBackward(int times) {
position = textBuffer.backward(position, times);
return this;
}
@Override
public Cursor moveTo(Mark mark) {
validateMark(mark);
position = mark.position();
return this;
}
@Override
public boolean skipBackward(String text) {
String prefix = getPrefix();
if (prefix.endsWith(text)) {
position = new Position(position.lineIndex(), position.columnIndex() - text.length());
return true;
} else {
return false;
}
}
@Override
public boolean skipForward(String text) {
String suffix = getSuffix();
if (suffix.startsWith(text)) {
position = new Position(position.lineIndex(), position.columnIndex() + text.length());
return true;
} else {
return false;
}
}
@Override
@Override
public Optional<Match> moveForwardToEndOfMatch(Pattern pattern) {
return moveForwardToXOfMatch(pattern, match -> position = match.endOfMatch());
}
private Optional<Match> moveForwardToXOfMatch(Pattern pattern, Consumer<Match> callback) {
Optional<Match> match = textBuffer.findForward(position, pattern);
match.ifPresent(callback);
return match;
}
@Override
public Cursor moveTo(Position position) {
validatePosition(position);
this.position = position;
return this;
}
@Override
public Cursor moveTo(int lineIndex, int columnIndex) {
return moveTo(new Position(lineIndex, columnIndex));
}
@Override
public Cursor write(String text) {
position = textBuffer.write(position, text);
return this;
}
@Override
public Cursor writeLine(String line) {
return write(line).write("\n");
}
@Override
public Cursor writeLines(String... lines) {
return writeLines(Arrays.asList(lines));
}
@Override
public Cursor writeLines(Iterable<String> lines) {
return writeLine(String.join("\n", lines));
}
@Override
public Cursor writeNewline() {
return write("\n");
}
@Override
public Cursor writeNewlineAfter() {
return writeNewline().moveBackward();
}
@Override
public Cursor deleteAll() {
moveToStartOfBuffer();
textBuffer.clear();
return this;
}
@Override
public Cursor deleteLine() {
moveToStartOfLine();
textBuffer.delete(position, textBuffer.getStartOfNextLine(position));
return this;
}
@Override
public Cursor deletePrefix() {
Position originalPosition = position;
moveToStartOfLine();
textBuffer.delete(position, originalPosition);
return this;
}
@Override
public Cursor deleteSuffix() {
textBuffer.delete(position, textBuffer.getEndOfLine(position));
return this;
}
@Override
public Cursor deleteForward() {
return deleteForward(1);
}
@Override
public Cursor deleteForward(int times) {
Position end = textBuffer.forward(position, times);
textBuffer.delete(position, end);
return this;
}
@Override
public Cursor deleteBackward() {
return deleteBackward(1);
}
@Override
public Cursor deleteBackward(int times) {
Position end = position;
moveBackward(times);
textBuffer.delete(position, end);
return this;
}
@Override
public Cursor deleteTo(Mark mark) {
validateMark(mark);
Position start = min(mark.position(), position);
Position end = max(mark.position(), position);
textBuffer.delete(start, end);
return this;
}
@Override
public boolean replaceMatch(Pattern pattern, Function<Match, String> replacer) {
Optional<Match> match = moveForwardToStartOfMatch(pattern);
if (!match.isPresent()) {
return false;
}
textBuffer.delete(match.get().startOfMatch(), match.get().endOfMatch());
write(replacer.apply(match.get()));
return true;
}
@Override
public int replaceMatches(Pattern pattern, Function<Match, String> replacer) {
int count = 0;
for (; replaceMatch(pattern, replacer); ++count) {
}
return count;
}
private void validatePosition(Position position) {
validateLineIndex(position.lineIndex());
int maxColumnIndex = textBuffer.getLine(position.lineIndex()).length();
if (position.columnIndex() < 0 || position.columnIndex() > maxColumnIndex) {
throw new IndexOutOfBoundsException("Column index of " + position.coordinateString() +
" is not in permitted range [0," + maxColumnIndex + "]");
}
}
private void validateLineIndex(int lineIndex) {
int maxLineIndex = textBuffer.getMaxLineIndex();
if (lineIndex < 0 || lineIndex > maxLineIndex) {
throw new IndexOutOfBoundsException("Line index " + lineIndex +
" not in permitted range [0," + maxLineIndex + "]");
}
}
private void validateMark(Mark mark) {
if (mark.secret() != unique) {
throw new IllegalArgumentException("Unknown mark " + mark);
}
if (!mark.version().equals(textBuffer.getVersion())) {
throw new IllegalArgumentException("Mark " + mark + " is outdated");
}
}
} |
`validateMark(mark);` | public Cursor deleteTo(Mark mark) {
Position start = mark.position();
Position end = position;
if (start.isAfter(end)) {
Position tmp = start;
start = end;
end = tmp;
}
textBuffer.delete(start, end);
return this;
} | Position start = mark.position(); | public Cursor deleteTo(Mark mark) {
validateMark(mark);
Position start = min(mark.position(), position);
Position end = max(mark.position(), position);
textBuffer.delete(start, end);
return this;
} | class CursorImpl implements Cursor {
private final TextBuffer textBuffer;
private final Object unique = new Object();
private Position position;
/**
* Creates a cursor to a text buffer.
*
* WARNING: The text buffer MUST NOT be accessed outside this cursor. This cursor
* takes sole ownership of the text buffer.
*
* @param textBuffer the text buffer this cursor owns and operates on
*/
CursorImpl(TextBuffer textBuffer) {
this.textBuffer = textBuffer;
position = textBuffer.getStartOfText();
}
@Override
public Position getPosition() {
return position;
}
@Override
public Mark createMark() {
return new Mark(position, textBuffer.getVersion(), unique);
}
@Override
public String getBufferText() {
return textBuffer.getString();
}
@Override
public String getLine() {
return textBuffer.getLine(position);
}
@Override
public String getPrefix() {
return textBuffer.getLinePrefix(position);
}
@Override
public String getSuffix() {
return textBuffer.getLineSuffix(position);
}
@Override
public String getTextTo(Mark mark) {
validateMark(mark);
Position start = mark.position();
Position end = position;
if (start.isAfter(end)) {
Position tmp = start;
start = end;
end = tmp;
}
return textBuffer.getSubstring(start, end);
}
@Override
public Cursor moveToStartOfBuffer() {
position = textBuffer.getStartOfText();
return this;
}
@Override
public Cursor moveToEndOfBuffer() {
position = textBuffer.getEndOfText();
return this;
}
@Override
public Cursor moveToStartOfLine() {
position = textBuffer.getStartOfLine(position);
return this;
}
@Override
public Cursor moveToStartOfPreviousLine() {
position = textBuffer.getStartOfPreviousLine(position);
return this;
}
@Override
public Cursor moveToStartOfNextLine() {
position = textBuffer.getStartOfNextLine(position);
return this;
}
@Override
public Cursor moveToStartOf(int lineIndex) {
validateLineIndex(lineIndex);
position = new Position(lineIndex, 0);
return this;
}
@Override
public Cursor moveToEndOfLine() {
position = textBuffer.getEndOfLine(position);
return this;
}
@Override
public Cursor moveToEndOfPreviousLine() {
return moveToStartOfPreviousLine().moveToEndOfLine();
}
@Override
public Cursor moveToEndOfNextLine() {
return moveToStartOfNextLine().moveToEndOfLine();
}
@Override
public Cursor moveToEndOf(int lineIndex) {
return moveToStartOf(lineIndex).moveToEndOfLine();
}
@Override
public Cursor moveForward() {
return moveForward(1);
}
@Override
public Cursor moveForward(int times) {
position = textBuffer.forward(position, times);
return this;
}
@Override
public Cursor moveBackward() {
return moveBackward(1);
}
@Override
public Cursor moveBackward(int times) {
position = textBuffer.backward(position, times);
return this;
}
@Override
public Cursor moveTo(Mark mark) {
validateMark(mark);
position = mark.position();
return this;
}
@Override
public boolean skipBackward(String text) {
String prefix = getPrefix();
if (prefix.endsWith(text)) {
position = new Position(position.lineIndex(), position.columnIndex() - text.length());
return true;
} else {
return false;
}
}
@Override
public boolean skipForward(String text) {
String suffix = getSuffix();
if (suffix.startsWith(text)) {
position = new Position(position.lineIndex(), position.columnIndex() + text.length());
return true;
} else {
return false;
}
}
@Override
public Optional<Match> moveForwardToStartOfMatch(Pattern pattern) {
return moveForwardToXOfMatch(pattern, match -> position = match.startOfMatch());
}
@Override
public Optional<Match> moveForwardToEndOfMatch(Pattern pattern) {
return moveForwardToXOfMatch(pattern, match -> position = match.endOfMatch());
}
private Optional<Match> moveForwardToXOfMatch(Pattern pattern, Consumer<Match> callback) {
Optional<Match> match = textBuffer.findForward(position, pattern);
match.ifPresent(callback);
return match;
}
@Override
public Cursor moveTo(Position position) {
validatePosition(position);
this.position = position;
return this;
}
@Override
public Cursor moveTo(int lineIndex, int columnIndex) {
return moveTo(new Position(lineIndex, columnIndex));
}
@Override
public Cursor write(String text) {
position = textBuffer.write(position, text);
return this;
}
@Override
public Cursor writeLine(String line) {
return write(line).write("\n");
}
@Override
public Cursor writeLines(String... lines) {
return writeLines(Arrays.asList(lines));
}
@Override
public Cursor writeLines(List<String> lines) {
return writeLine(String.join("\n", lines));
}
@Override
public Cursor writeNewline() {
return write("\n");
}
@Override
public Cursor writeNewlineAfter() {
return writeNewline().moveBackward();
}
@Override
public Cursor deleteAll() {
moveToStartOfBuffer();
textBuffer.clear();
return this;
}
@Override
public Cursor deleteLine() {
moveToStartOfLine();
textBuffer.delete(position, textBuffer.getStartOfNextLine(position));
return this;
}
@Override
public Cursor deletePrefix() {
Position originalPosition = position;
moveToStartOfLine();
textBuffer.delete(position, originalPosition);
return this;
}
@Override
public Cursor deleteSuffix() {
textBuffer.delete(position, textBuffer.getEndOfLine(position));
return this;
}
@Override
public Cursor deleteForward() {
return deleteForward(1);
}
@Override
public Cursor deleteForward(int times) {
Position end = textBuffer.forward(position, times);
textBuffer.delete(position, end);
return this;
}
@Override
public Cursor deleteBackward() {
return deleteBackward(1);
}
@Override
public Cursor deleteBackward(int times) {
Position end = position;
moveBackward(times);
textBuffer.delete(position, end);
return this;
}
@Override
@Override
public boolean replaceMatch(Pattern pattern, Function<Match, String> replacer) {
Optional<Match> match = moveForwardToStartOfMatch(pattern);
if (!match.isPresent()) {
return false;
}
textBuffer.delete(match.get().startOfMatch(), match.get().endOfMatch());
write(replacer.apply(match.get()));
return true;
}
@Override
public int replaceMatches(Pattern pattern, Function<Match, String> replacer) {
int count = 0;
for (; replaceMatch(pattern, replacer); ++count) {
}
return count;
}
private void validatePosition(Position position) {
validateLineIndex(position.lineIndex());
int maxColumnIndex = textBuffer.getLine(position.lineIndex()).length();
if (position.columnIndex() < 0 || position.columnIndex() > maxColumnIndex) {
throw new IndexOutOfBoundsException("Column index of " + position.coordinateString() +
" is not in permitted range [0," + maxColumnIndex + "]");
}
}
private void validateLineIndex(int lineIndex) {
int maxLineIndex = textBuffer.getMaxLineIndex();
if (lineIndex < 0 || lineIndex > maxLineIndex) {
throw new IndexOutOfBoundsException("Line index " + lineIndex +
" not in permitted range [0," + maxLineIndex + "]");
}
}
private void validateMark(Mark mark) {
if (mark.secret() != unique) {
throw new IllegalArgumentException("Unknown mark " + mark);
}
if (!mark.version().equals(textBuffer.getVersion())) {
throw new IllegalArgumentException("Mark " + mark + " is outdated");
}
}
} | class CursorImpl implements Cursor {
private final TextBuffer textBuffer;
private final Object unique = new Object();
private Position position;
/**
* Creates a cursor to a text buffer.
*
* WARNING: The text buffer MUST NOT be accessed outside this cursor. This cursor
* takes sole ownership of the text buffer.
*
* @param textBuffer the text buffer this cursor owns and operates on
*/
CursorImpl(TextBuffer textBuffer) {
this.textBuffer = textBuffer;
position = textBuffer.getStartOfText();
}
@Override
public Position getPosition() {
return position;
}
@Override
public Mark createMark() {
return new Mark(position, textBuffer.getVersion(), unique);
}
@Override
public String getBufferText() {
return textBuffer.getString();
}
@Override
public String getLine() {
return textBuffer.getLine(position);
}
@Override
public String getPrefix() {
return textBuffer.getLinePrefix(position);
}
@Override
public String getSuffix() {
return textBuffer.getLineSuffix(position);
}
@Override
public String getTextTo(Mark mark) {
validateMark(mark);
Position start = min(mark.position(), position);
Position end = max(mark.position(), position);
return textBuffer.getSubstring(start, end);
}
@Override
public Cursor moveToStartOfBuffer() {
position = textBuffer.getStartOfText();
return this;
}
@Override
public Cursor moveToEndOfBuffer() {
position = textBuffer.getEndOfText();
return this;
}
@Override
public Cursor moveToStartOfLine() {
position = textBuffer.getStartOfLine(position);
return this;
}
@Override
public Cursor moveToStartOfPreviousLine() {
position = textBuffer.getStartOfPreviousLine(position);
return this;
}
@Override
public Cursor moveToStartOfNextLine() {
position = textBuffer.getStartOfNextLine(position);
return this;
}
@Override
public Cursor moveToStartOf(int lineIndex) {
validateLineIndex(lineIndex);
position = new Position(lineIndex, 0);
return this;
}
@Override
public Cursor moveToEndOfLine() {
position = textBuffer.getEndOfLine(position);
return this;
}
@Override
public Cursor moveToEndOfPreviousLine() {
return moveToStartOfPreviousLine().moveToEndOfLine();
}
@Override
public Cursor moveToEndOfNextLine() {
return moveToStartOfNextLine().moveToEndOfLine();
}
@Override
public Cursor moveToEndOf(int lineIndex) {
return moveToStartOf(lineIndex).moveToEndOfLine();
}
@Override
public Cursor moveForward() {
return moveForward(1);
}
@Override
public Cursor moveForward(int times) {
position = textBuffer.forward(position, times);
return this;
}
@Override
public Cursor moveBackward() {
return moveBackward(1);
}
@Override
public Cursor moveBackward(int times) {
position = textBuffer.backward(position, times);
return this;
}
@Override
public Cursor moveTo(Mark mark) {
validateMark(mark);
position = mark.position();
return this;
}
@Override
public boolean skipBackward(String text) {
String prefix = getPrefix();
if (prefix.endsWith(text)) {
position = new Position(position.lineIndex(), position.columnIndex() - text.length());
return true;
} else {
return false;
}
}
@Override
public boolean skipForward(String text) {
String suffix = getSuffix();
if (suffix.startsWith(text)) {
position = new Position(position.lineIndex(), position.columnIndex() + text.length());
return true;
} else {
return false;
}
}
@Override
public Optional<Match> moveForwardToStartOfMatch(Pattern pattern) {
return moveForwardToXOfMatch(pattern, match -> position = match.startOfMatch());
}
@Override
public Optional<Match> moveForwardToEndOfMatch(Pattern pattern) {
return moveForwardToXOfMatch(pattern, match -> position = match.endOfMatch());
}
private Optional<Match> moveForwardToXOfMatch(Pattern pattern, Consumer<Match> callback) {
Optional<Match> match = textBuffer.findForward(position, pattern);
match.ifPresent(callback);
return match;
}
@Override
public Cursor moveTo(Position position) {
validatePosition(position);
this.position = position;
return this;
}
@Override
public Cursor moveTo(int lineIndex, int columnIndex) {
return moveTo(new Position(lineIndex, columnIndex));
}
@Override
public Cursor write(String text) {
position = textBuffer.write(position, text);
return this;
}
@Override
public Cursor writeLine(String line) {
return write(line).write("\n");
}
@Override
public Cursor writeLines(String... lines) {
return writeLines(Arrays.asList(lines));
}
@Override
public Cursor writeLines(Iterable<String> lines) {
return writeLine(String.join("\n", lines));
}
@Override
public Cursor writeNewline() {
return write("\n");
}
@Override
public Cursor writeNewlineAfter() {
return writeNewline().moveBackward();
}
@Override
public Cursor deleteAll() {
moveToStartOfBuffer();
textBuffer.clear();
return this;
}
@Override
public Cursor deleteLine() {
moveToStartOfLine();
textBuffer.delete(position, textBuffer.getStartOfNextLine(position));
return this;
}
@Override
public Cursor deletePrefix() {
Position originalPosition = position;
moveToStartOfLine();
textBuffer.delete(position, originalPosition);
return this;
}
@Override
public Cursor deleteSuffix() {
textBuffer.delete(position, textBuffer.getEndOfLine(position));
return this;
}
@Override
public Cursor deleteForward() {
return deleteForward(1);
}
@Override
public Cursor deleteForward(int times) {
Position end = textBuffer.forward(position, times);
textBuffer.delete(position, end);
return this;
}
@Override
public Cursor deleteBackward() {
return deleteBackward(1);
}
@Override
public Cursor deleteBackward(int times) {
Position end = position;
moveBackward(times);
textBuffer.delete(position, end);
return this;
}
@Override
@Override
public boolean replaceMatch(Pattern pattern, Function<Match, String> replacer) {
Optional<Match> match = moveForwardToStartOfMatch(pattern);
if (!match.isPresent()) {
return false;
}
textBuffer.delete(match.get().startOfMatch(), match.get().endOfMatch());
write(replacer.apply(match.get()));
return true;
}
@Override
public int replaceMatches(Pattern pattern, Function<Match, String> replacer) {
int count = 0;
for (; replaceMatch(pattern, replacer); ++count) {
}
return count;
}
private void validatePosition(Position position) {
validateLineIndex(position.lineIndex());
int maxColumnIndex = textBuffer.getLine(position.lineIndex()).length();
if (position.columnIndex() < 0 || position.columnIndex() > maxColumnIndex) {
throw new IndexOutOfBoundsException("Column index of " + position.coordinateString() +
" is not in permitted range [0," + maxColumnIndex + "]");
}
}
private void validateLineIndex(int lineIndex) {
int maxLineIndex = textBuffer.getMaxLineIndex();
if (lineIndex < 0 || lineIndex > maxLineIndex) {
throw new IndexOutOfBoundsException("Line index " + lineIndex +
" not in permitted range [0," + maxLineIndex + "]");
}
}
private void validateMark(Mark mark) {
if (mark.secret() != unique) {
throw new IllegalArgumentException("Unknown mark " + mark);
}
if (!mark.version().equals(textBuffer.getVersion())) {
throw new IllegalArgumentException("Mark " + mark + " is outdated");
}
}
} |
No need for this comment -- code is clear :) | public boolean replaceMatch(Pattern pattern, Function<Match, String> replacer) {
Optional<Match> match = moveForwardToStartOfMatch(pattern);
if (!match.isPresent()) {
return false;
}
textBuffer.delete(match.get().startOfMatch(), match.get().endOfMatch());
write(replacer.apply(match.get()));
return true;
} | public boolean replaceMatch(Pattern pattern, Function<Match, String> replacer) {
Optional<Match> match = moveForwardToStartOfMatch(pattern);
if (!match.isPresent()) {
return false;
}
textBuffer.delete(match.get().startOfMatch(), match.get().endOfMatch());
write(replacer.apply(match.get()));
return true;
} | class CursorImpl implements Cursor {
private final TextBuffer textBuffer;
private final Object unique = new Object();
private Position position;
/**
* Creates a cursor to a text buffer.
*
* WARNING: The text buffer MUST NOT be accessed outside this cursor. This cursor
* takes sole ownership of the text buffer.
*
* @param textBuffer the text buffer this cursor owns and operates on
*/
CursorImpl(TextBuffer textBuffer) {
this.textBuffer = textBuffer;
position = textBuffer.getStartOfText();
}
@Override
public Position getPosition() {
return position;
}
@Override
public Mark createMark() {
return new Mark(position, textBuffer.getVersion(), unique);
}
@Override
public String getBufferText() {
return textBuffer.getString();
}
@Override
public String getLine() {
return textBuffer.getLine(position);
}
@Override
public String getPrefix() {
return textBuffer.getLinePrefix(position);
}
@Override
public String getSuffix() {
return textBuffer.getLineSuffix(position);
}
@Override
public String getTextTo(Mark mark) {
validateMark(mark);
Position start = mark.position();
Position end = position;
if (start.isAfter(end)) {
Position tmp = start;
start = end;
end = tmp;
}
return textBuffer.getSubstring(start, end);
}
@Override
public Cursor moveToStartOfBuffer() {
position = textBuffer.getStartOfText();
return this;
}
@Override
public Cursor moveToEndOfBuffer() {
position = textBuffer.getEndOfText();
return this;
}
@Override
public Cursor moveToStartOfLine() {
position = textBuffer.getStartOfLine(position);
return this;
}
@Override
public Cursor moveToStartOfPreviousLine() {
position = textBuffer.getStartOfPreviousLine(position);
return this;
}
@Override
public Cursor moveToStartOfNextLine() {
position = textBuffer.getStartOfNextLine(position);
return this;
}
@Override
public Cursor moveToStartOf(int lineIndex) {
validateLineIndex(lineIndex);
position = new Position(lineIndex, 0);
return this;
}
@Override
public Cursor moveToEndOfLine() {
position = textBuffer.getEndOfLine(position);
return this;
}
@Override
public Cursor moveToEndOfPreviousLine() {
return moveToStartOfPreviousLine().moveToEndOfLine();
}
@Override
public Cursor moveToEndOfNextLine() {
return moveToStartOfNextLine().moveToEndOfLine();
}
@Override
public Cursor moveToEndOf(int lineIndex) {
return moveToStartOf(lineIndex).moveToEndOfLine();
}
@Override
public Cursor moveForward() {
return moveForward(1);
}
@Override
public Cursor moveForward(int times) {
position = textBuffer.forward(position, times);
return this;
}
@Override
public Cursor moveBackward() {
return moveBackward(1);
}
@Override
public Cursor moveBackward(int times) {
position = textBuffer.backward(position, times);
return this;
}
@Override
public Cursor moveTo(Mark mark) {
validateMark(mark);
position = mark.position();
return this;
}
@Override
public boolean skipBackward(String text) {
String prefix = getPrefix();
if (prefix.endsWith(text)) {
position = new Position(position.lineIndex(), position.columnIndex() - text.length());
return true;
} else {
return false;
}
}
@Override
public boolean skipForward(String text) {
String suffix = getSuffix();
if (suffix.startsWith(text)) {
position = new Position(position.lineIndex(), position.columnIndex() + text.length());
return true;
} else {
return false;
}
}
@Override
public Optional<Match> moveForwardToStartOfMatch(Pattern pattern) {
return moveForwardToXOfMatch(pattern, match -> position = match.startOfMatch());
}
@Override
public Optional<Match> moveForwardToEndOfMatch(Pattern pattern) {
return moveForwardToXOfMatch(pattern, match -> position = match.endOfMatch());
}
private Optional<Match> moveForwardToXOfMatch(Pattern pattern, Consumer<Match> callback) {
Optional<Match> match = textBuffer.findForward(position, pattern);
match.ifPresent(callback);
return match;
}
@Override
public Cursor moveTo(Position position) {
validatePosition(position);
this.position = position;
return this;
}
@Override
public Cursor moveTo(int lineIndex, int columnIndex) {
return moveTo(new Position(lineIndex, columnIndex));
}
@Override
public Cursor write(String text) {
position = textBuffer.write(position, text);
return this;
}
@Override
public Cursor writeLine(String line) {
return write(line).write("\n");
}
@Override
public Cursor writeLines(String... lines) {
return writeLines(Arrays.asList(lines));
}
@Override
public Cursor writeLines(List<String> lines) {
return writeLine(String.join("\n", lines));
}
@Override
public Cursor writeNewline() {
return write("\n");
}
@Override
public Cursor writeNewlineAfter() {
return writeNewline().moveBackward();
}
@Override
public Cursor deleteAll() {
moveToStartOfBuffer();
textBuffer.clear();
return this;
}
@Override
public Cursor deleteLine() {
moveToStartOfLine();
textBuffer.delete(position, textBuffer.getStartOfNextLine(position));
return this;
}
@Override
public Cursor deletePrefix() {
Position originalPosition = position;
moveToStartOfLine();
textBuffer.delete(position, originalPosition);
return this;
}
@Override
public Cursor deleteSuffix() {
textBuffer.delete(position, textBuffer.getEndOfLine(position));
return this;
}
@Override
public Cursor deleteForward() {
return deleteForward(1);
}
@Override
public Cursor deleteForward(int times) {
Position end = textBuffer.forward(position, times);
textBuffer.delete(position, end);
return this;
}
@Override
public Cursor deleteBackward() {
return deleteBackward(1);
}
@Override
public Cursor deleteBackward(int times) {
Position end = position;
moveBackward(times);
textBuffer.delete(position, end);
return this;
}
@Override
public Cursor deleteTo(Mark mark) {
Position start = mark.position();
Position end = position;
if (start.isAfter(end)) {
Position tmp = start;
start = end;
end = tmp;
}
textBuffer.delete(start, end);
return this;
}
@Override
@Override
public int replaceMatches(Pattern pattern, Function<Match, String> replacer) {
int count = 0;
for (; replaceMatch(pattern, replacer); ++count) {
}
return count;
}
private void validatePosition(Position position) {
validateLineIndex(position.lineIndex());
int maxColumnIndex = textBuffer.getLine(position.lineIndex()).length();
if (position.columnIndex() < 0 || position.columnIndex() > maxColumnIndex) {
throw new IndexOutOfBoundsException("Column index of " + position.coordinateString() +
" is not in permitted range [0," + maxColumnIndex + "]");
}
}
private void validateLineIndex(int lineIndex) {
int maxLineIndex = textBuffer.getMaxLineIndex();
if (lineIndex < 0 || lineIndex > maxLineIndex) {
throw new IndexOutOfBoundsException("Line index " + lineIndex +
" not in permitted range [0," + maxLineIndex + "]");
}
}
private void validateMark(Mark mark) {
if (mark.secret() != unique) {
throw new IllegalArgumentException("Unknown mark " + mark);
}
if (!mark.version().equals(textBuffer.getVersion())) {
throw new IllegalArgumentException("Mark " + mark + " is outdated");
}
}
} | class CursorImpl implements Cursor {
private final TextBuffer textBuffer;
private final Object unique = new Object();
private Position position;
/**
* Creates a cursor to a text buffer.
*
* WARNING: The text buffer MUST NOT be accessed outside this cursor. This cursor
* takes sole ownership of the text buffer.
*
* @param textBuffer the text buffer this cursor owns and operates on
*/
CursorImpl(TextBuffer textBuffer) {
this.textBuffer = textBuffer;
position = textBuffer.getStartOfText();
}
@Override
public Position getPosition() {
return position;
}
@Override
public Mark createMark() {
return new Mark(position, textBuffer.getVersion(), unique);
}
@Override
public String getBufferText() {
return textBuffer.getString();
}
@Override
public String getLine() {
return textBuffer.getLine(position);
}
@Override
public String getPrefix() {
return textBuffer.getLinePrefix(position);
}
@Override
public String getSuffix() {
return textBuffer.getLineSuffix(position);
}
@Override
public String getTextTo(Mark mark) {
validateMark(mark);
Position start = min(mark.position(), position);
Position end = max(mark.position(), position);
return textBuffer.getSubstring(start, end);
}
@Override
public Cursor moveToStartOfBuffer() {
position = textBuffer.getStartOfText();
return this;
}
@Override
public Cursor moveToEndOfBuffer() {
position = textBuffer.getEndOfText();
return this;
}
@Override
public Cursor moveToStartOfLine() {
position = textBuffer.getStartOfLine(position);
return this;
}
@Override
public Cursor moveToStartOfPreviousLine() {
position = textBuffer.getStartOfPreviousLine(position);
return this;
}
@Override
public Cursor moveToStartOfNextLine() {
position = textBuffer.getStartOfNextLine(position);
return this;
}
@Override
public Cursor moveToStartOf(int lineIndex) {
validateLineIndex(lineIndex);
position = new Position(lineIndex, 0);
return this;
}
@Override
public Cursor moveToEndOfLine() {
position = textBuffer.getEndOfLine(position);
return this;
}
@Override
public Cursor moveToEndOfPreviousLine() {
return moveToStartOfPreviousLine().moveToEndOfLine();
}
@Override
public Cursor moveToEndOfNextLine() {
return moveToStartOfNextLine().moveToEndOfLine();
}
@Override
public Cursor moveToEndOf(int lineIndex) {
return moveToStartOf(lineIndex).moveToEndOfLine();
}
@Override
public Cursor moveForward() {
return moveForward(1);
}
@Override
public Cursor moveForward(int times) {
position = textBuffer.forward(position, times);
return this;
}
@Override
public Cursor moveBackward() {
return moveBackward(1);
}
@Override
public Cursor moveBackward(int times) {
position = textBuffer.backward(position, times);
return this;
}
@Override
public Cursor moveTo(Mark mark) {
validateMark(mark);
position = mark.position();
return this;
}
@Override
public boolean skipBackward(String text) {
String prefix = getPrefix();
if (prefix.endsWith(text)) {
position = new Position(position.lineIndex(), position.columnIndex() - text.length());
return true;
} else {
return false;
}
}
@Override
public boolean skipForward(String text) {
String suffix = getSuffix();
if (suffix.startsWith(text)) {
position = new Position(position.lineIndex(), position.columnIndex() + text.length());
return true;
} else {
return false;
}
}
@Override
public Optional<Match> moveForwardToStartOfMatch(Pattern pattern) {
return moveForwardToXOfMatch(pattern, match -> position = match.startOfMatch());
}
@Override
public Optional<Match> moveForwardToEndOfMatch(Pattern pattern) {
return moveForwardToXOfMatch(pattern, match -> position = match.endOfMatch());
}
private Optional<Match> moveForwardToXOfMatch(Pattern pattern, Consumer<Match> callback) {
Optional<Match> match = textBuffer.findForward(position, pattern);
match.ifPresent(callback);
return match;
}
@Override
public Cursor moveTo(Position position) {
validatePosition(position);
this.position = position;
return this;
}
@Override
public Cursor moveTo(int lineIndex, int columnIndex) {
return moveTo(new Position(lineIndex, columnIndex));
}
@Override
public Cursor write(String text) {
position = textBuffer.write(position, text);
return this;
}
@Override
public Cursor writeLine(String line) {
return write(line).write("\n");
}
@Override
public Cursor writeLines(String... lines) {
return writeLines(Arrays.asList(lines));
}
@Override
public Cursor writeLines(Iterable<String> lines) {
return writeLine(String.join("\n", lines));
}
@Override
public Cursor writeNewline() {
return write("\n");
}
@Override
public Cursor writeNewlineAfter() {
return writeNewline().moveBackward();
}
@Override
public Cursor deleteAll() {
moveToStartOfBuffer();
textBuffer.clear();
return this;
}
@Override
public Cursor deleteLine() {
moveToStartOfLine();
textBuffer.delete(position, textBuffer.getStartOfNextLine(position));
return this;
}
@Override
public Cursor deletePrefix() {
Position originalPosition = position;
moveToStartOfLine();
textBuffer.delete(position, originalPosition);
return this;
}
@Override
public Cursor deleteSuffix() {
textBuffer.delete(position, textBuffer.getEndOfLine(position));
return this;
}
@Override
public Cursor deleteForward() {
return deleteForward(1);
}
@Override
public Cursor deleteForward(int times) {
Position end = textBuffer.forward(position, times);
textBuffer.delete(position, end);
return this;
}
@Override
public Cursor deleteBackward() {
return deleteBackward(1);
}
@Override
public Cursor deleteBackward(int times) {
Position end = position;
moveBackward(times);
textBuffer.delete(position, end);
return this;
}
@Override
public Cursor deleteTo(Mark mark) {
validateMark(mark);
Position start = min(mark.position(), position);
Position end = max(mark.position(), position);
textBuffer.delete(start, end);
return this;
}
@Override
@Override
public int replaceMatches(Pattern pattern, Function<Match, String> replacer) {
int count = 0;
for (; replaceMatch(pattern, replacer); ++count) {
}
return count;
}
private void validatePosition(Position position) {
validateLineIndex(position.lineIndex());
int maxColumnIndex = textBuffer.getLine(position.lineIndex()).length();
if (position.columnIndex() < 0 || position.columnIndex() > maxColumnIndex) {
throw new IndexOutOfBoundsException("Column index of " + position.coordinateString() +
" is not in permitted range [0," + maxColumnIndex + "]");
}
}
private void validateLineIndex(int lineIndex) {
int maxLineIndex = textBuffer.getMaxLineIndex();
if (lineIndex < 0 || lineIndex > maxLineIndex) {
throw new IndexOutOfBoundsException("Line index " + lineIndex +
" not in permitted range [0," + maxLineIndex + "]");
}
}
private void validateMark(Mark mark) {
if (mark.secret() != unique) {
throw new IllegalArgumentException("Unknown mark " + mark);
}
if (!mark.version().equals(textBuffer.getVersion())) {
throw new IllegalArgumentException("Mark " + mark + " is outdated");
}
}
} | |
Neat! | private void validateMark(Mark mark) {
if (mark.secret() != unique) {
throw new IllegalArgumentException("Unknown mark " + mark);
}
if (!mark.version().equals(textBuffer.getVersion())) {
throw new IllegalArgumentException("Mark " + mark + " is outdated");
}
} | if (mark.secret() != unique) { | private void validateMark(Mark mark) {
if (mark.secret() != unique) {
throw new IllegalArgumentException("Unknown mark " + mark);
}
if (!mark.version().equals(textBuffer.getVersion())) {
throw new IllegalArgumentException("Mark " + mark + " is outdated");
}
} | class CursorImpl implements Cursor {
private final TextBuffer textBuffer;
private final Object unique = new Object();
private Position position;
/**
* Creates a cursor to a text buffer.
*
* WARNING: The text buffer MUST NOT be accessed outside this cursor. This cursor
* takes sole ownership of the text buffer.
*
* @param textBuffer the text buffer this cursor owns and operates on
*/
CursorImpl(TextBuffer textBuffer) {
this.textBuffer = textBuffer;
position = textBuffer.getStartOfText();
}
@Override
public Position getPosition() {
return position;
}
@Override
public Mark createMark() {
return new Mark(position, textBuffer.getVersion(), unique);
}
@Override
public String getBufferText() {
return textBuffer.getString();
}
@Override
public String getLine() {
return textBuffer.getLine(position);
}
@Override
public String getPrefix() {
return textBuffer.getLinePrefix(position);
}
@Override
public String getSuffix() {
return textBuffer.getLineSuffix(position);
}
@Override
public String getTextTo(Mark mark) {
validateMark(mark);
Position start = mark.position();
Position end = position;
if (start.isAfter(end)) {
Position tmp = start;
start = end;
end = tmp;
}
return textBuffer.getSubstring(start, end);
}
@Override
public Cursor moveToStartOfBuffer() {
position = textBuffer.getStartOfText();
return this;
}
@Override
public Cursor moveToEndOfBuffer() {
position = textBuffer.getEndOfText();
return this;
}
@Override
public Cursor moveToStartOfLine() {
position = textBuffer.getStartOfLine(position);
return this;
}
@Override
public Cursor moveToStartOfPreviousLine() {
position = textBuffer.getStartOfPreviousLine(position);
return this;
}
@Override
public Cursor moveToStartOfNextLine() {
position = textBuffer.getStartOfNextLine(position);
return this;
}
@Override
public Cursor moveToStartOf(int lineIndex) {
validateLineIndex(lineIndex);
position = new Position(lineIndex, 0);
return this;
}
@Override
public Cursor moveToEndOfLine() {
position = textBuffer.getEndOfLine(position);
return this;
}
@Override
public Cursor moveToEndOfPreviousLine() {
return moveToStartOfPreviousLine().moveToEndOfLine();
}
@Override
public Cursor moveToEndOfNextLine() {
return moveToStartOfNextLine().moveToEndOfLine();
}
@Override
public Cursor moveToEndOf(int lineIndex) {
return moveToStartOf(lineIndex).moveToEndOfLine();
}
@Override
public Cursor moveForward() {
return moveForward(1);
}
@Override
public Cursor moveForward(int times) {
position = textBuffer.forward(position, times);
return this;
}
@Override
public Cursor moveBackward() {
return moveBackward(1);
}
@Override
public Cursor moveBackward(int times) {
position = textBuffer.backward(position, times);
return this;
}
@Override
public Cursor moveTo(Mark mark) {
validateMark(mark);
position = mark.position();
return this;
}
@Override
public boolean skipBackward(String text) {
String prefix = getPrefix();
if (prefix.endsWith(text)) {
position = new Position(position.lineIndex(), position.columnIndex() - text.length());
return true;
} else {
return false;
}
}
@Override
public boolean skipForward(String text) {
String suffix = getSuffix();
if (suffix.startsWith(text)) {
position = new Position(position.lineIndex(), position.columnIndex() + text.length());
return true;
} else {
return false;
}
}
@Override
public Optional<Match> moveForwardToStartOfMatch(Pattern pattern) {
return moveForwardToXOfMatch(pattern, match -> position = match.startOfMatch());
}
@Override
public Optional<Match> moveForwardToEndOfMatch(Pattern pattern) {
return moveForwardToXOfMatch(pattern, match -> position = match.endOfMatch());
}
private Optional<Match> moveForwardToXOfMatch(Pattern pattern, Consumer<Match> callback) {
Optional<Match> match = textBuffer.findForward(position, pattern);
match.ifPresent(callback);
return match;
}
@Override
public Cursor moveTo(Position position) {
validatePosition(position);
this.position = position;
return this;
}
@Override
public Cursor moveTo(int lineIndex, int columnIndex) {
return moveTo(new Position(lineIndex, columnIndex));
}
@Override
public Cursor write(String text) {
position = textBuffer.write(position, text);
return this;
}
@Override
public Cursor writeLine(String line) {
return write(line).write("\n");
}
@Override
public Cursor writeLines(String... lines) {
return writeLines(Arrays.asList(lines));
}
@Override
public Cursor writeLines(List<String> lines) {
return writeLine(String.join("\n", lines));
}
@Override
public Cursor writeNewline() {
return write("\n");
}
@Override
public Cursor writeNewlineAfter() {
return writeNewline().moveBackward();
}
@Override
public Cursor deleteAll() {
moveToStartOfBuffer();
textBuffer.clear();
return this;
}
@Override
public Cursor deleteLine() {
moveToStartOfLine();
textBuffer.delete(position, textBuffer.getStartOfNextLine(position));
return this;
}
@Override
public Cursor deletePrefix() {
Position originalPosition = position;
moveToStartOfLine();
textBuffer.delete(position, originalPosition);
return this;
}
@Override
public Cursor deleteSuffix() {
textBuffer.delete(position, textBuffer.getEndOfLine(position));
return this;
}
@Override
public Cursor deleteForward() {
return deleteForward(1);
}
@Override
public Cursor deleteForward(int times) {
Position end = textBuffer.forward(position, times);
textBuffer.delete(position, end);
return this;
}
@Override
public Cursor deleteBackward() {
return deleteBackward(1);
}
@Override
public Cursor deleteBackward(int times) {
Position end = position;
moveBackward(times);
textBuffer.delete(position, end);
return this;
}
@Override
public Cursor deleteTo(Mark mark) {
Position start = mark.position();
Position end = position;
if (start.isAfter(end)) {
Position tmp = start;
start = end;
end = tmp;
}
textBuffer.delete(start, end);
return this;
}
@Override
public boolean replaceMatch(Pattern pattern, Function<Match, String> replacer) {
Optional<Match> match = moveForwardToStartOfMatch(pattern);
if (!match.isPresent()) {
return false;
}
textBuffer.delete(match.get().startOfMatch(), match.get().endOfMatch());
write(replacer.apply(match.get()));
return true;
}
@Override
public int replaceMatches(Pattern pattern, Function<Match, String> replacer) {
int count = 0;
for (; replaceMatch(pattern, replacer); ++count) {
}
return count;
}
private void validatePosition(Position position) {
validateLineIndex(position.lineIndex());
int maxColumnIndex = textBuffer.getLine(position.lineIndex()).length();
if (position.columnIndex() < 0 || position.columnIndex() > maxColumnIndex) {
throw new IndexOutOfBoundsException("Column index of " + position.coordinateString() +
" is not in permitted range [0," + maxColumnIndex + "]");
}
}
private void validateLineIndex(int lineIndex) {
int maxLineIndex = textBuffer.getMaxLineIndex();
if (lineIndex < 0 || lineIndex > maxLineIndex) {
throw new IndexOutOfBoundsException("Line index " + lineIndex +
" not in permitted range [0," + maxLineIndex + "]");
}
}
} | class CursorImpl implements Cursor {
private final TextBuffer textBuffer;
private final Object unique = new Object();
private Position position;
/**
* Creates a cursor to a text buffer.
*
* WARNING: The text buffer MUST NOT be accessed outside this cursor. This cursor
* takes sole ownership of the text buffer.
*
* @param textBuffer the text buffer this cursor owns and operates on
*/
CursorImpl(TextBuffer textBuffer) {
this.textBuffer = textBuffer;
position = textBuffer.getStartOfText();
}
@Override
public Position getPosition() {
return position;
}
@Override
public Mark createMark() {
return new Mark(position, textBuffer.getVersion(), unique);
}
@Override
public String getBufferText() {
return textBuffer.getString();
}
@Override
public String getLine() {
return textBuffer.getLine(position);
}
@Override
public String getPrefix() {
return textBuffer.getLinePrefix(position);
}
@Override
public String getSuffix() {
return textBuffer.getLineSuffix(position);
}
@Override
public String getTextTo(Mark mark) {
validateMark(mark);
Position start = min(mark.position(), position);
Position end = max(mark.position(), position);
return textBuffer.getSubstring(start, end);
}
@Override
public Cursor moveToStartOfBuffer() {
position = textBuffer.getStartOfText();
return this;
}
@Override
public Cursor moveToEndOfBuffer() {
position = textBuffer.getEndOfText();
return this;
}
@Override
public Cursor moveToStartOfLine() {
position = textBuffer.getStartOfLine(position);
return this;
}
@Override
public Cursor moveToStartOfPreviousLine() {
position = textBuffer.getStartOfPreviousLine(position);
return this;
}
@Override
public Cursor moveToStartOfNextLine() {
position = textBuffer.getStartOfNextLine(position);
return this;
}
@Override
public Cursor moveToStartOf(int lineIndex) {
validateLineIndex(lineIndex);
position = new Position(lineIndex, 0);
return this;
}
@Override
public Cursor moveToEndOfLine() {
position = textBuffer.getEndOfLine(position);
return this;
}
@Override
public Cursor moveToEndOfPreviousLine() {
return moveToStartOfPreviousLine().moveToEndOfLine();
}
@Override
public Cursor moveToEndOfNextLine() {
return moveToStartOfNextLine().moveToEndOfLine();
}
@Override
public Cursor moveToEndOf(int lineIndex) {
return moveToStartOf(lineIndex).moveToEndOfLine();
}
@Override
public Cursor moveForward() {
return moveForward(1);
}
@Override
public Cursor moveForward(int times) {
position = textBuffer.forward(position, times);
return this;
}
@Override
public Cursor moveBackward() {
return moveBackward(1);
}
@Override
public Cursor moveBackward(int times) {
position = textBuffer.backward(position, times);
return this;
}
@Override
public Cursor moveTo(Mark mark) {
validateMark(mark);
position = mark.position();
return this;
}
@Override
public boolean skipBackward(String text) {
String prefix = getPrefix();
if (prefix.endsWith(text)) {
position = new Position(position.lineIndex(), position.columnIndex() - text.length());
return true;
} else {
return false;
}
}
@Override
public boolean skipForward(String text) {
String suffix = getSuffix();
if (suffix.startsWith(text)) {
position = new Position(position.lineIndex(), position.columnIndex() + text.length());
return true;
} else {
return false;
}
}
@Override
public Optional<Match> moveForwardToStartOfMatch(Pattern pattern) {
return moveForwardToXOfMatch(pattern, match -> position = match.startOfMatch());
}
@Override
public Optional<Match> moveForwardToEndOfMatch(Pattern pattern) {
return moveForwardToXOfMatch(pattern, match -> position = match.endOfMatch());
}
private Optional<Match> moveForwardToXOfMatch(Pattern pattern, Consumer<Match> callback) {
Optional<Match> match = textBuffer.findForward(position, pattern);
match.ifPresent(callback);
return match;
}
@Override
public Cursor moveTo(Position position) {
validatePosition(position);
this.position = position;
return this;
}
@Override
public Cursor moveTo(int lineIndex, int columnIndex) {
return moveTo(new Position(lineIndex, columnIndex));
}
@Override
public Cursor write(String text) {
position = textBuffer.write(position, text);
return this;
}
@Override
public Cursor writeLine(String line) {
return write(line).write("\n");
}
@Override
public Cursor writeLines(String... lines) {
return writeLines(Arrays.asList(lines));
}
@Override
public Cursor writeLines(Iterable<String> lines) {
return writeLine(String.join("\n", lines));
}
@Override
public Cursor writeNewline() {
return write("\n");
}
@Override
public Cursor writeNewlineAfter() {
return writeNewline().moveBackward();
}
@Override
public Cursor deleteAll() {
moveToStartOfBuffer();
textBuffer.clear();
return this;
}
@Override
public Cursor deleteLine() {
moveToStartOfLine();
textBuffer.delete(position, textBuffer.getStartOfNextLine(position));
return this;
}
@Override
public Cursor deletePrefix() {
Position originalPosition = position;
moveToStartOfLine();
textBuffer.delete(position, originalPosition);
return this;
}
@Override
public Cursor deleteSuffix() {
textBuffer.delete(position, textBuffer.getEndOfLine(position));
return this;
}
@Override
public Cursor deleteForward() {
return deleteForward(1);
}
@Override
public Cursor deleteForward(int times) {
Position end = textBuffer.forward(position, times);
textBuffer.delete(position, end);
return this;
}
@Override
public Cursor deleteBackward() {
return deleteBackward(1);
}
@Override
public Cursor deleteBackward(int times) {
Position end = position;
moveBackward(times);
textBuffer.delete(position, end);
return this;
}
@Override
public Cursor deleteTo(Mark mark) {
validateMark(mark);
Position start = min(mark.position(), position);
Position end = max(mark.position(), position);
textBuffer.delete(start, end);
return this;
}
@Override
public boolean replaceMatch(Pattern pattern, Function<Match, String> replacer) {
Optional<Match> match = moveForwardToStartOfMatch(pattern);
if (!match.isPresent()) {
return false;
}
textBuffer.delete(match.get().startOfMatch(), match.get().endOfMatch());
write(replacer.apply(match.get()));
return true;
}
@Override
public int replaceMatches(Pattern pattern, Function<Match, String> replacer) {
int count = 0;
for (; replaceMatch(pattern, replacer); ++count) {
}
return count;
}
private void validatePosition(Position position) {
validateLineIndex(position.lineIndex());
int maxColumnIndex = textBuffer.getLine(position.lineIndex()).length();
if (position.columnIndex() < 0 || position.columnIndex() > maxColumnIndex) {
throw new IndexOutOfBoundsException("Column index of " + position.coordinateString() +
" is not in permitted range [0," + maxColumnIndex + "]");
}
}
private void validateLineIndex(int lineIndex) {
int maxLineIndex = textBuffer.getMaxLineIndex();
if (lineIndex < 0 || lineIndex > maxLineIndex) {
throw new IndexOutOfBoundsException("Line index " + lineIndex +
" not in permitted range [0," + maxLineIndex + "]");
}
}
} |
Interesting ... | private void validateMark(Mark mark) {
if (mark.secret() != unique) {
throw new IllegalArgumentException("Unknown mark " + mark);
}
if (!mark.version().equals(textBuffer.getVersion())) {
throw new IllegalArgumentException("Mark " + mark + " is outdated");
}
} | if (!mark.version().equals(textBuffer.getVersion())) { | private void validateMark(Mark mark) {
if (mark.secret() != unique) {
throw new IllegalArgumentException("Unknown mark " + mark);
}
if (!mark.version().equals(textBuffer.getVersion())) {
throw new IllegalArgumentException("Mark " + mark + " is outdated");
}
} | class CursorImpl implements Cursor {
private final TextBuffer textBuffer;
private final Object unique = new Object();
private Position position;
/**
* Creates a cursor to a text buffer.
*
* WARNING: The text buffer MUST NOT be accessed outside this cursor. This cursor
* takes sole ownership of the text buffer.
*
* @param textBuffer the text buffer this cursor owns and operates on
*/
CursorImpl(TextBuffer textBuffer) {
this.textBuffer = textBuffer;
position = textBuffer.getStartOfText();
}
@Override
public Position getPosition() {
return position;
}
@Override
public Mark createMark() {
return new Mark(position, textBuffer.getVersion(), unique);
}
@Override
public String getBufferText() {
return textBuffer.getString();
}
@Override
public String getLine() {
return textBuffer.getLine(position);
}
@Override
public String getPrefix() {
return textBuffer.getLinePrefix(position);
}
@Override
public String getSuffix() {
return textBuffer.getLineSuffix(position);
}
@Override
public String getTextTo(Mark mark) {
validateMark(mark);
Position start = mark.position();
Position end = position;
if (start.isAfter(end)) {
Position tmp = start;
start = end;
end = tmp;
}
return textBuffer.getSubstring(start, end);
}
@Override
public Cursor moveToStartOfBuffer() {
position = textBuffer.getStartOfText();
return this;
}
@Override
public Cursor moveToEndOfBuffer() {
position = textBuffer.getEndOfText();
return this;
}
@Override
public Cursor moveToStartOfLine() {
position = textBuffer.getStartOfLine(position);
return this;
}
@Override
public Cursor moveToStartOfPreviousLine() {
position = textBuffer.getStartOfPreviousLine(position);
return this;
}
@Override
public Cursor moveToStartOfNextLine() {
position = textBuffer.getStartOfNextLine(position);
return this;
}
@Override
public Cursor moveToStartOf(int lineIndex) {
validateLineIndex(lineIndex);
position = new Position(lineIndex, 0);
return this;
}
@Override
public Cursor moveToEndOfLine() {
position = textBuffer.getEndOfLine(position);
return this;
}
@Override
public Cursor moveToEndOfPreviousLine() {
return moveToStartOfPreviousLine().moveToEndOfLine();
}
@Override
public Cursor moveToEndOfNextLine() {
return moveToStartOfNextLine().moveToEndOfLine();
}
@Override
public Cursor moveToEndOf(int lineIndex) {
return moveToStartOf(lineIndex).moveToEndOfLine();
}
@Override
public Cursor moveForward() {
return moveForward(1);
}
@Override
public Cursor moveForward(int times) {
position = textBuffer.forward(position, times);
return this;
}
@Override
public Cursor moveBackward() {
return moveBackward(1);
}
@Override
public Cursor moveBackward(int times) {
position = textBuffer.backward(position, times);
return this;
}
@Override
public Cursor moveTo(Mark mark) {
validateMark(mark);
position = mark.position();
return this;
}
@Override
public boolean skipBackward(String text) {
String prefix = getPrefix();
if (prefix.endsWith(text)) {
position = new Position(position.lineIndex(), position.columnIndex() - text.length());
return true;
} else {
return false;
}
}
@Override
public boolean skipForward(String text) {
String suffix = getSuffix();
if (suffix.startsWith(text)) {
position = new Position(position.lineIndex(), position.columnIndex() + text.length());
return true;
} else {
return false;
}
}
@Override
public Optional<Match> moveForwardToStartOfMatch(Pattern pattern) {
return moveForwardToXOfMatch(pattern, match -> position = match.startOfMatch());
}
@Override
public Optional<Match> moveForwardToEndOfMatch(Pattern pattern) {
return moveForwardToXOfMatch(pattern, match -> position = match.endOfMatch());
}
private Optional<Match> moveForwardToXOfMatch(Pattern pattern, Consumer<Match> callback) {
Optional<Match> match = textBuffer.findForward(position, pattern);
match.ifPresent(callback);
return match;
}
@Override
public Cursor moveTo(Position position) {
validatePosition(position);
this.position = position;
return this;
}
@Override
public Cursor moveTo(int lineIndex, int columnIndex) {
return moveTo(new Position(lineIndex, columnIndex));
}
@Override
public Cursor write(String text) {
position = textBuffer.write(position, text);
return this;
}
@Override
public Cursor writeLine(String line) {
return write(line).write("\n");
}
@Override
public Cursor writeLines(String... lines) {
return writeLines(Arrays.asList(lines));
}
@Override
public Cursor writeLines(List<String> lines) {
return writeLine(String.join("\n", lines));
}
@Override
public Cursor writeNewline() {
return write("\n");
}
@Override
public Cursor writeNewlineAfter() {
return writeNewline().moveBackward();
}
@Override
public Cursor deleteAll() {
moveToStartOfBuffer();
textBuffer.clear();
return this;
}
@Override
public Cursor deleteLine() {
moveToStartOfLine();
textBuffer.delete(position, textBuffer.getStartOfNextLine(position));
return this;
}
@Override
public Cursor deletePrefix() {
Position originalPosition = position;
moveToStartOfLine();
textBuffer.delete(position, originalPosition);
return this;
}
@Override
public Cursor deleteSuffix() {
textBuffer.delete(position, textBuffer.getEndOfLine(position));
return this;
}
@Override
public Cursor deleteForward() {
return deleteForward(1);
}
@Override
public Cursor deleteForward(int times) {
Position end = textBuffer.forward(position, times);
textBuffer.delete(position, end);
return this;
}
@Override
public Cursor deleteBackward() {
return deleteBackward(1);
}
@Override
public Cursor deleteBackward(int times) {
Position end = position;
moveBackward(times);
textBuffer.delete(position, end);
return this;
}
@Override
public Cursor deleteTo(Mark mark) {
Position start = mark.position();
Position end = position;
if (start.isAfter(end)) {
Position tmp = start;
start = end;
end = tmp;
}
textBuffer.delete(start, end);
return this;
}
@Override
public boolean replaceMatch(Pattern pattern, Function<Match, String> replacer) {
Optional<Match> match = moveForwardToStartOfMatch(pattern);
if (!match.isPresent()) {
return false;
}
textBuffer.delete(match.get().startOfMatch(), match.get().endOfMatch());
write(replacer.apply(match.get()));
return true;
}
@Override
public int replaceMatches(Pattern pattern, Function<Match, String> replacer) {
int count = 0;
for (; replaceMatch(pattern, replacer); ++count) {
}
return count;
}
private void validatePosition(Position position) {
validateLineIndex(position.lineIndex());
int maxColumnIndex = textBuffer.getLine(position.lineIndex()).length();
if (position.columnIndex() < 0 || position.columnIndex() > maxColumnIndex) {
throw new IndexOutOfBoundsException("Column index of " + position.coordinateString() +
" is not in permitted range [0," + maxColumnIndex + "]");
}
}
private void validateLineIndex(int lineIndex) {
int maxLineIndex = textBuffer.getMaxLineIndex();
if (lineIndex < 0 || lineIndex > maxLineIndex) {
throw new IndexOutOfBoundsException("Line index " + lineIndex +
" not in permitted range [0," + maxLineIndex + "]");
}
}
} | class CursorImpl implements Cursor {
private final TextBuffer textBuffer;
private final Object unique = new Object();
private Position position;
/**
* Creates a cursor to a text buffer.
*
* WARNING: The text buffer MUST NOT be accessed outside this cursor. This cursor
* takes sole ownership of the text buffer.
*
* @param textBuffer the text buffer this cursor owns and operates on
*/
CursorImpl(TextBuffer textBuffer) {
this.textBuffer = textBuffer;
position = textBuffer.getStartOfText();
}
@Override
public Position getPosition() {
return position;
}
@Override
public Mark createMark() {
return new Mark(position, textBuffer.getVersion(), unique);
}
@Override
public String getBufferText() {
return textBuffer.getString();
}
@Override
public String getLine() {
return textBuffer.getLine(position);
}
@Override
public String getPrefix() {
return textBuffer.getLinePrefix(position);
}
@Override
public String getSuffix() {
return textBuffer.getLineSuffix(position);
}
@Override
public String getTextTo(Mark mark) {
validateMark(mark);
Position start = min(mark.position(), position);
Position end = max(mark.position(), position);
return textBuffer.getSubstring(start, end);
}
@Override
public Cursor moveToStartOfBuffer() {
position = textBuffer.getStartOfText();
return this;
}
@Override
public Cursor moveToEndOfBuffer() {
position = textBuffer.getEndOfText();
return this;
}
@Override
public Cursor moveToStartOfLine() {
position = textBuffer.getStartOfLine(position);
return this;
}
@Override
public Cursor moveToStartOfPreviousLine() {
position = textBuffer.getStartOfPreviousLine(position);
return this;
}
@Override
public Cursor moveToStartOfNextLine() {
position = textBuffer.getStartOfNextLine(position);
return this;
}
@Override
public Cursor moveToStartOf(int lineIndex) {
validateLineIndex(lineIndex);
position = new Position(lineIndex, 0);
return this;
}
@Override
public Cursor moveToEndOfLine() {
position = textBuffer.getEndOfLine(position);
return this;
}
@Override
public Cursor moveToEndOfPreviousLine() {
return moveToStartOfPreviousLine().moveToEndOfLine();
}
@Override
public Cursor moveToEndOfNextLine() {
return moveToStartOfNextLine().moveToEndOfLine();
}
@Override
public Cursor moveToEndOf(int lineIndex) {
return moveToStartOf(lineIndex).moveToEndOfLine();
}
@Override
public Cursor moveForward() {
return moveForward(1);
}
@Override
public Cursor moveForward(int times) {
position = textBuffer.forward(position, times);
return this;
}
@Override
public Cursor moveBackward() {
return moveBackward(1);
}
@Override
public Cursor moveBackward(int times) {
position = textBuffer.backward(position, times);
return this;
}
@Override
public Cursor moveTo(Mark mark) {
validateMark(mark);
position = mark.position();
return this;
}
@Override
public boolean skipBackward(String text) {
String prefix = getPrefix();
if (prefix.endsWith(text)) {
position = new Position(position.lineIndex(), position.columnIndex() - text.length());
return true;
} else {
return false;
}
}
@Override
public boolean skipForward(String text) {
String suffix = getSuffix();
if (suffix.startsWith(text)) {
position = new Position(position.lineIndex(), position.columnIndex() + text.length());
return true;
} else {
return false;
}
}
@Override
public Optional<Match> moveForwardToStartOfMatch(Pattern pattern) {
return moveForwardToXOfMatch(pattern, match -> position = match.startOfMatch());
}
@Override
public Optional<Match> moveForwardToEndOfMatch(Pattern pattern) {
return moveForwardToXOfMatch(pattern, match -> position = match.endOfMatch());
}
private Optional<Match> moveForwardToXOfMatch(Pattern pattern, Consumer<Match> callback) {
Optional<Match> match = textBuffer.findForward(position, pattern);
match.ifPresent(callback);
return match;
}
@Override
public Cursor moveTo(Position position) {
validatePosition(position);
this.position = position;
return this;
}
@Override
public Cursor moveTo(int lineIndex, int columnIndex) {
return moveTo(new Position(lineIndex, columnIndex));
}
@Override
public Cursor write(String text) {
position = textBuffer.write(position, text);
return this;
}
@Override
public Cursor writeLine(String line) {
return write(line).write("\n");
}
@Override
public Cursor writeLines(String... lines) {
return writeLines(Arrays.asList(lines));
}
@Override
public Cursor writeLines(Iterable<String> lines) {
return writeLine(String.join("\n", lines));
}
@Override
public Cursor writeNewline() {
return write("\n");
}
@Override
public Cursor writeNewlineAfter() {
return writeNewline().moveBackward();
}
@Override
public Cursor deleteAll() {
moveToStartOfBuffer();
textBuffer.clear();
return this;
}
@Override
public Cursor deleteLine() {
moveToStartOfLine();
textBuffer.delete(position, textBuffer.getStartOfNextLine(position));
return this;
}
@Override
public Cursor deletePrefix() {
Position originalPosition = position;
moveToStartOfLine();
textBuffer.delete(position, originalPosition);
return this;
}
@Override
public Cursor deleteSuffix() {
textBuffer.delete(position, textBuffer.getEndOfLine(position));
return this;
}
@Override
public Cursor deleteForward() {
return deleteForward(1);
}
@Override
public Cursor deleteForward(int times) {
Position end = textBuffer.forward(position, times);
textBuffer.delete(position, end);
return this;
}
@Override
public Cursor deleteBackward() {
return deleteBackward(1);
}
@Override
public Cursor deleteBackward(int times) {
Position end = position;
moveBackward(times);
textBuffer.delete(position, end);
return this;
}
@Override
public Cursor deleteTo(Mark mark) {
validateMark(mark);
Position start = min(mark.position(), position);
Position end = max(mark.position(), position);
textBuffer.delete(start, end);
return this;
}
@Override
public boolean replaceMatch(Pattern pattern, Function<Match, String> replacer) {
Optional<Match> match = moveForwardToStartOfMatch(pattern);
if (!match.isPresent()) {
return false;
}
textBuffer.delete(match.get().startOfMatch(), match.get().endOfMatch());
write(replacer.apply(match.get()));
return true;
}
@Override
public int replaceMatches(Pattern pattern, Function<Match, String> replacer) {
int count = 0;
for (; replaceMatch(pattern, replacer); ++count) {
}
return count;
}
private void validatePosition(Position position) {
validateLineIndex(position.lineIndex());
int maxColumnIndex = textBuffer.getLine(position.lineIndex()).length();
if (position.columnIndex() < 0 || position.columnIndex() > maxColumnIndex) {
throw new IndexOutOfBoundsException("Column index of " + position.coordinateString() +
" is not in permitted range [0," + maxColumnIndex + "]");
}
}
private void validateLineIndex(int lineIndex) {
int maxLineIndex = textBuffer.getMaxLineIndex();
if (lineIndex < 0 || lineIndex > maxLineIndex) {
throw new IndexOutOfBoundsException("Line index " + lineIndex +
" not in permitted range [0," + maxLineIndex + "]");
}
}
} |
I'm sure you'd appreciate ``` Position start = min(position, mark.position()); Position end = max(position, mark.position()); ``` given `min` and `max` as you'd expect. | public String getTextTo(Mark mark) {
validateMark(mark);
Position start = mark.position();
Position end = position;
if (start.isAfter(end)) {
Position tmp = start;
start = end;
end = tmp;
}
return textBuffer.getSubstring(start, end);
} | } | public String getTextTo(Mark mark) {
validateMark(mark);
Position start = min(mark.position(), position);
Position end = max(mark.position(), position);
return textBuffer.getSubstring(start, end);
} | class CursorImpl implements Cursor {
private final TextBuffer textBuffer;
private final Object unique = new Object();
private Position position;
/**
* Creates a cursor to a text buffer.
*
* WARNING: The text buffer MUST NOT be accessed outside this cursor. This cursor
* takes sole ownership of the text buffer.
*
* @param textBuffer the text buffer this cursor owns and operates on
*/
CursorImpl(TextBuffer textBuffer) {
this.textBuffer = textBuffer;
position = textBuffer.getStartOfText();
}
@Override
public Position getPosition() {
return position;
}
@Override
public Mark createMark() {
return new Mark(position, textBuffer.getVersion(), unique);
}
@Override
public String getBufferText() {
return textBuffer.getString();
}
@Override
public String getLine() {
return textBuffer.getLine(position);
}
@Override
public String getPrefix() {
return textBuffer.getLinePrefix(position);
}
@Override
public String getSuffix() {
return textBuffer.getLineSuffix(position);
}
@Override
@Override
public Cursor moveToStartOfBuffer() {
position = textBuffer.getStartOfText();
return this;
}
@Override
public Cursor moveToEndOfBuffer() {
position = textBuffer.getEndOfText();
return this;
}
@Override
public Cursor moveToStartOfLine() {
position = textBuffer.getStartOfLine(position);
return this;
}
@Override
public Cursor moveToStartOfPreviousLine() {
position = textBuffer.getStartOfPreviousLine(position);
return this;
}
@Override
public Cursor moveToStartOfNextLine() {
position = textBuffer.getStartOfNextLine(position);
return this;
}
@Override
public Cursor moveToStartOf(int lineIndex) {
validateLineIndex(lineIndex);
position = new Position(lineIndex, 0);
return this;
}
@Override
public Cursor moveToEndOfLine() {
position = textBuffer.getEndOfLine(position);
return this;
}
@Override
public Cursor moveToEndOfPreviousLine() {
return moveToStartOfPreviousLine().moveToEndOfLine();
}
@Override
public Cursor moveToEndOfNextLine() {
return moveToStartOfNextLine().moveToEndOfLine();
}
@Override
public Cursor moveToEndOf(int lineIndex) {
return moveToStartOf(lineIndex).moveToEndOfLine();
}
@Override
public Cursor moveForward() {
return moveForward(1);
}
@Override
public Cursor moveForward(int times) {
position = textBuffer.forward(position, times);
return this;
}
@Override
public Cursor moveBackward() {
return moveBackward(1);
}
@Override
public Cursor moveBackward(int times) {
position = textBuffer.backward(position, times);
return this;
}
@Override
public Cursor moveTo(Mark mark) {
validateMark(mark);
position = mark.position();
return this;
}
@Override
public boolean skipBackward(String text) {
String prefix = getPrefix();
if (prefix.endsWith(text)) {
position = new Position(position.lineIndex(), position.columnIndex() - text.length());
return true;
} else {
return false;
}
}
@Override
public boolean skipForward(String text) {
String suffix = getSuffix();
if (suffix.startsWith(text)) {
position = new Position(position.lineIndex(), position.columnIndex() + text.length());
return true;
} else {
return false;
}
}
@Override
public Optional<Match> moveForwardToStartOfMatch(Pattern pattern) {
return moveForwardToXOfMatch(pattern, match -> position = match.startOfMatch());
}
@Override
public Optional<Match> moveForwardToEndOfMatch(Pattern pattern) {
return moveForwardToXOfMatch(pattern, match -> position = match.endOfMatch());
}
private Optional<Match> moveForwardToXOfMatch(Pattern pattern, Consumer<Match> callback) {
Optional<Match> match = textBuffer.findForward(position, pattern);
match.ifPresent(callback);
return match;
}
@Override
public Cursor moveTo(Position position) {
validatePosition(position);
this.position = position;
return this;
}
@Override
public Cursor moveTo(int lineIndex, int columnIndex) {
return moveTo(new Position(lineIndex, columnIndex));
}
@Override
public Cursor write(String text) {
position = textBuffer.write(position, text);
return this;
}
@Override
public Cursor writeLine(String line) {
return write(line).write("\n");
}
@Override
public Cursor writeLines(String... lines) {
return writeLines(Arrays.asList(lines));
}
@Override
public Cursor writeLines(List<String> lines) {
return writeLine(String.join("\n", lines));
}
@Override
public Cursor writeNewline() {
return write("\n");
}
@Override
public Cursor writeNewlineAfter() {
return writeNewline().moveBackward();
}
@Override
public Cursor deleteAll() {
moveToStartOfBuffer();
textBuffer.clear();
return this;
}
@Override
public Cursor deleteLine() {
moveToStartOfLine();
textBuffer.delete(position, textBuffer.getStartOfNextLine(position));
return this;
}
@Override
public Cursor deletePrefix() {
Position originalPosition = position;
moveToStartOfLine();
textBuffer.delete(position, originalPosition);
return this;
}
@Override
public Cursor deleteSuffix() {
textBuffer.delete(position, textBuffer.getEndOfLine(position));
return this;
}
@Override
public Cursor deleteForward() {
return deleteForward(1);
}
@Override
public Cursor deleteForward(int times) {
Position end = textBuffer.forward(position, times);
textBuffer.delete(position, end);
return this;
}
@Override
public Cursor deleteBackward() {
return deleteBackward(1);
}
@Override
public Cursor deleteBackward(int times) {
Position end = position;
moveBackward(times);
textBuffer.delete(position, end);
return this;
}
@Override
public Cursor deleteTo(Mark mark) {
Position start = mark.position();
Position end = position;
if (start.isAfter(end)) {
Position tmp = start;
start = end;
end = tmp;
}
textBuffer.delete(start, end);
return this;
}
@Override
public boolean replaceMatch(Pattern pattern, Function<Match, String> replacer) {
Optional<Match> match = moveForwardToStartOfMatch(pattern);
if (!match.isPresent()) {
return false;
}
textBuffer.delete(match.get().startOfMatch(), match.get().endOfMatch());
write(replacer.apply(match.get()));
return true;
}
@Override
public int replaceMatches(Pattern pattern, Function<Match, String> replacer) {
int count = 0;
for (; replaceMatch(pattern, replacer); ++count) {
}
return count;
}
private void validatePosition(Position position) {
validateLineIndex(position.lineIndex());
int maxColumnIndex = textBuffer.getLine(position.lineIndex()).length();
if (position.columnIndex() < 0 || position.columnIndex() > maxColumnIndex) {
throw new IndexOutOfBoundsException("Column index of " + position.coordinateString() +
" is not in permitted range [0," + maxColumnIndex + "]");
}
}
private void validateLineIndex(int lineIndex) {
int maxLineIndex = textBuffer.getMaxLineIndex();
if (lineIndex < 0 || lineIndex > maxLineIndex) {
throw new IndexOutOfBoundsException("Line index " + lineIndex +
" not in permitted range [0," + maxLineIndex + "]");
}
}
private void validateMark(Mark mark) {
if (mark.secret() != unique) {
throw new IllegalArgumentException("Unknown mark " + mark);
}
if (!mark.version().equals(textBuffer.getVersion())) {
throw new IllegalArgumentException("Mark " + mark + " is outdated");
}
}
} | class CursorImpl implements Cursor {
private final TextBuffer textBuffer;
private final Object unique = new Object();
private Position position;
/**
* Creates a cursor to a text buffer.
*
* WARNING: The text buffer MUST NOT be accessed outside this cursor. This cursor
* takes sole ownership of the text buffer.
*
* @param textBuffer the text buffer this cursor owns and operates on
*/
CursorImpl(TextBuffer textBuffer) {
this.textBuffer = textBuffer;
position = textBuffer.getStartOfText();
}
@Override
public Position getPosition() {
return position;
}
@Override
public Mark createMark() {
return new Mark(position, textBuffer.getVersion(), unique);
}
@Override
public String getBufferText() {
return textBuffer.getString();
}
@Override
public String getLine() {
return textBuffer.getLine(position);
}
@Override
public String getPrefix() {
return textBuffer.getLinePrefix(position);
}
@Override
public String getSuffix() {
return textBuffer.getLineSuffix(position);
}
@Override
@Override
public Cursor moveToStartOfBuffer() {
position = textBuffer.getStartOfText();
return this;
}
@Override
public Cursor moveToEndOfBuffer() {
position = textBuffer.getEndOfText();
return this;
}
@Override
public Cursor moveToStartOfLine() {
position = textBuffer.getStartOfLine(position);
return this;
}
@Override
public Cursor moveToStartOfPreviousLine() {
position = textBuffer.getStartOfPreviousLine(position);
return this;
}
@Override
public Cursor moveToStartOfNextLine() {
position = textBuffer.getStartOfNextLine(position);
return this;
}
@Override
public Cursor moveToStartOf(int lineIndex) {
validateLineIndex(lineIndex);
position = new Position(lineIndex, 0);
return this;
}
@Override
public Cursor moveToEndOfLine() {
position = textBuffer.getEndOfLine(position);
return this;
}
@Override
public Cursor moveToEndOfPreviousLine() {
return moveToStartOfPreviousLine().moveToEndOfLine();
}
@Override
public Cursor moveToEndOfNextLine() {
return moveToStartOfNextLine().moveToEndOfLine();
}
@Override
public Cursor moveToEndOf(int lineIndex) {
return moveToStartOf(lineIndex).moveToEndOfLine();
}
@Override
public Cursor moveForward() {
return moveForward(1);
}
@Override
public Cursor moveForward(int times) {
position = textBuffer.forward(position, times);
return this;
}
@Override
public Cursor moveBackward() {
return moveBackward(1);
}
@Override
public Cursor moveBackward(int times) {
position = textBuffer.backward(position, times);
return this;
}
@Override
public Cursor moveTo(Mark mark) {
validateMark(mark);
position = mark.position();
return this;
}
@Override
public boolean skipBackward(String text) {
String prefix = getPrefix();
if (prefix.endsWith(text)) {
position = new Position(position.lineIndex(), position.columnIndex() - text.length());
return true;
} else {
return false;
}
}
@Override
public boolean skipForward(String text) {
String suffix = getSuffix();
if (suffix.startsWith(text)) {
position = new Position(position.lineIndex(), position.columnIndex() + text.length());
return true;
} else {
return false;
}
}
@Override
public Optional<Match> moveForwardToStartOfMatch(Pattern pattern) {
return moveForwardToXOfMatch(pattern, match -> position = match.startOfMatch());
}
@Override
public Optional<Match> moveForwardToEndOfMatch(Pattern pattern) {
return moveForwardToXOfMatch(pattern, match -> position = match.endOfMatch());
}
private Optional<Match> moveForwardToXOfMatch(Pattern pattern, Consumer<Match> callback) {
Optional<Match> match = textBuffer.findForward(position, pattern);
match.ifPresent(callback);
return match;
}
@Override
public Cursor moveTo(Position position) {
validatePosition(position);
this.position = position;
return this;
}
@Override
public Cursor moveTo(int lineIndex, int columnIndex) {
return moveTo(new Position(lineIndex, columnIndex));
}
@Override
public Cursor write(String text) {
position = textBuffer.write(position, text);
return this;
}
@Override
public Cursor writeLine(String line) {
return write(line).write("\n");
}
@Override
public Cursor writeLines(String... lines) {
return writeLines(Arrays.asList(lines));
}
@Override
public Cursor writeLines(Iterable<String> lines) {
return writeLine(String.join("\n", lines));
}
@Override
public Cursor writeNewline() {
return write("\n");
}
@Override
public Cursor writeNewlineAfter() {
return writeNewline().moveBackward();
}
@Override
public Cursor deleteAll() {
moveToStartOfBuffer();
textBuffer.clear();
return this;
}
@Override
public Cursor deleteLine() {
moveToStartOfLine();
textBuffer.delete(position, textBuffer.getStartOfNextLine(position));
return this;
}
@Override
public Cursor deletePrefix() {
Position originalPosition = position;
moveToStartOfLine();
textBuffer.delete(position, originalPosition);
return this;
}
@Override
public Cursor deleteSuffix() {
textBuffer.delete(position, textBuffer.getEndOfLine(position));
return this;
}
@Override
public Cursor deleteForward() {
return deleteForward(1);
}
@Override
public Cursor deleteForward(int times) {
Position end = textBuffer.forward(position, times);
textBuffer.delete(position, end);
return this;
}
@Override
public Cursor deleteBackward() {
return deleteBackward(1);
}
@Override
public Cursor deleteBackward(int times) {
Position end = position;
moveBackward(times);
textBuffer.delete(position, end);
return this;
}
@Override
public Cursor deleteTo(Mark mark) {
validateMark(mark);
Position start = min(mark.position(), position);
Position end = max(mark.position(), position);
textBuffer.delete(start, end);
return this;
}
@Override
public boolean replaceMatch(Pattern pattern, Function<Match, String> replacer) {
Optional<Match> match = moveForwardToStartOfMatch(pattern);
if (!match.isPresent()) {
return false;
}
textBuffer.delete(match.get().startOfMatch(), match.get().endOfMatch());
write(replacer.apply(match.get()));
return true;
}
@Override
public int replaceMatches(Pattern pattern, Function<Match, String> replacer) {
int count = 0;
for (; replaceMatch(pattern, replacer); ++count) {
}
return count;
}
private void validatePosition(Position position) {
validateLineIndex(position.lineIndex());
int maxColumnIndex = textBuffer.getLine(position.lineIndex()).length();
if (position.columnIndex() < 0 || position.columnIndex() > maxColumnIndex) {
throw new IndexOutOfBoundsException("Column index of " + position.coordinateString() +
" is not in permitted range [0," + maxColumnIndex + "]");
}
}
private void validateLineIndex(int lineIndex) {
int maxLineIndex = textBuffer.getMaxLineIndex();
if (lineIndex < 0 || lineIndex > maxLineIndex) {
throw new IndexOutOfBoundsException("Line index " + lineIndex +
" not in permitted range [0," + maxLineIndex + "]");
}
}
private void validateMark(Mark mark) {
if (mark.secret() != unique) {
throw new IllegalArgumentException("Unknown mark " + mark);
}
if (!mark.version().equals(textBuffer.getVersion())) {
throw new IllegalArgumentException("Mark " + mark + " is outdated");
}
}
} |
😋 | public int compareTo(Position that) {
return COMPARATOR.compare(this, that);
} | return COMPARATOR.compare(this, that); | public int compareTo(Position that) {
return COMPARATOR.compare(this, that);
} | class Position implements Comparable<Position> {
private static final Position START_POSITION = new Position(0, 0);
private static final Comparator<Position> COMPARATOR = Comparator
.comparingInt((Position position) -> position.lineIndex())
.thenComparingInt((Position position) -> position.columnIndex());
private final int lineIndex;
private final int columnIndex;
/** Returns the first position at line index 0 and column index 0 */
public static Position start() {
return START_POSITION;
}
Position(int lineIndex, int columnIndex) {
this.lineIndex = lineIndex;
this.columnIndex = columnIndex;
}
public int lineIndex() {
return lineIndex;
}
public int columnIndex() {
return columnIndex;
}
@Override
public boolean isAfter(Position that) { return compareTo(that) > 0; }
public boolean isNotAfter(Position that) { return !isAfter(that); }
public boolean isBefore(Position that) { return compareTo(that) < 0; }
public boolean isNotBefore(Position that) { return !isBefore(that); }
public String coordinateString() {
return "(" + lineIndex + "," + columnIndex + ")";
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Position position = (Position) o;
return lineIndex == position.lineIndex &&
columnIndex == position.columnIndex;
}
@Override
public int hashCode() {
return Objects.hash(lineIndex, columnIndex);
}
@Override
public String toString() {
return coordinateString();
}
} | class Position implements Comparable<Position> {
private static final Position START_POSITION = new Position(0, 0);
private static final Comparator<Position> COMPARATOR = Comparator
.comparingInt(Position::lineIndex)
.thenComparingInt(Position::columnIndex);
private final int lineIndex;
private final int columnIndex;
/** Returns the first position at line index 0 and column index 0 */
public static Position start() {
return START_POSITION;
}
Position(int lineIndex, int columnIndex) {
this.lineIndex = lineIndex;
this.columnIndex = columnIndex;
}
public int lineIndex() {
return lineIndex;
}
public int columnIndex() {
return columnIndex;
}
@Override
public boolean isAfter(Position that) { return compareTo(that) > 0; }
public boolean isNotAfter(Position that) { return !isAfter(that); }
public boolean isBefore(Position that) { return compareTo(that) < 0; }
public boolean isNotBefore(Position that) { return !isBefore(that); }
public String coordinateString() {
return "(" + lineIndex + "," + columnIndex + ")";
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Position position = (Position) o;
return lineIndex == position.lineIndex &&
columnIndex == position.columnIndex;
}
@Override
public int hashCode() {
return Objects.hash(lineIndex, columnIndex);
}
@Override
public String toString() {
return coordinateString();
}
} |
Having missed the `.remove(0)` a few lines up, I though this should be `lineIndex + numberOfLinesToInsert, ...`. Perhaps that's the more readable option anyway? | public Position write(Position position, String text) {
List<String> linesToInsert = splitString(text, true, false);
if (linesToInsert.isEmpty()) {
return position;
}
int lineIndex = position.lineIndex();
String prefix = getLinePrefix(position);
linesToInsert.set(0, prefix + linesToInsert.get(0));
int numberOfLinesToInsert = linesToInsert.size() - 1;
String prefixOfCursorAfterwards = linesToInsert.get(numberOfLinesToInsert);
String suffix = getLineSuffix(position);
String lastLine = prefixOfCursorAfterwards + suffix;
linesToInsert.set(numberOfLinesToInsert, lastLine);
lines.set(lineIndex, linesToInsert.remove(0));
lines.addAll(lineIndex + 1, linesToInsert);
incrementVersion();
return new Position(lineIndex + linesToInsert.size(), prefixOfCursorAfterwards.length());
} | return new Position(lineIndex + linesToInsert.size(), prefixOfCursorAfterwards.length()); | public Position write(Position position, String text) {
List<String> linesToInsert = new LinkedList<>(splitString(text, true, false));
if (linesToInsert.isEmpty()) {
return position;
}
linesToInsert.set(0, getLinePrefix(position) + linesToInsert.get(0));
String lastLine = linesToInsert.get(linesToInsert.size() - 1);
int endColumnIndex = lastLine.length();
linesToInsert.set(linesToInsert.size() - 1, lastLine + getLineSuffix(position));
int lineIndex = position.lineIndex();
int endLineIndex = lineIndex + linesToInsert.size() - 1;
lines.set(lineIndex, linesToInsert.remove(0));
lines.addAll(lineIndex + 1, linesToInsert);
incrementVersion();
return new Position(endLineIndex, endColumnIndex);
} | class TextBufferImpl implements TextBuffer {
private final LinkedList<String> lines = new LinkedList<>();
private Version version = new Version();
TextBufferImpl() {
lines.add("");
}
TextBufferImpl(String text) {
lines.add("");
write(getStartOfText(), text);
version = new Version();
}
@Override
public Version getVersion() {
return version;
}
@Override
public String getString() {
return String.join("\n", lines);
}
@Override
public int getMaxLineIndex() {
return lines.size() - 1;
}
@Override
public String getLine(int lineIndex) {
return lines.get(lineIndex);
}
@Override
@Override
public void clear() {
lines.clear();
lines.add("");
}
@Override
public void delete(Position start, Position end) {
if (start.isAfter(end)) {
throw new IllegalArgumentException("start position " + start +
" is after end position " + end);
}
String prefix = getLinePrefix(start);
String suffix = getLineSuffix(end);
String stichedLine = prefix + suffix;
lines.set(start.lineIndex(), stichedLine);
deleteLines(start.lineIndex() + 1, end.lineIndex() + 1);
incrementVersion();
}
private void deleteLines(int startIndex, int endIndex) {
int fromIndex = endIndex;
for (int toIndex = startIndex; fromIndex <= getMaxLineIndex(); ++toIndex, ++fromIndex) {
lines.set(toIndex, lines.get(fromIndex));
}
truncate(getMaxLineIndex() - (endIndex - startIndex));
}
private void truncate(int newMaxLineIndex) {
while (getMaxLineIndex() > newMaxLineIndex) {
lines.remove(getMaxLineIndex());
}
}
private void incrementVersion() {
version = version.next();
}
} | class TextBufferImpl implements TextBuffer {
/** Invariant: {@code size() >= 1}. An empty text buffer {@code => [""]} */
private final ArrayList<String> lines = new ArrayList<>();
private Version version = new Version();
TextBufferImpl() {
lines.add("");
}
TextBufferImpl(String text) {
this();
write(getStartOfText(), text);
version = new Version();
}
@Override
public Version getVersion() {
return version;
}
@Override
public String getString() {
return String.join("\n", lines);
}
@Override
public int getMaxLineIndex() {
return lines.size() - 1;
}
@Override
public String getLine(int lineIndex) {
return lines.get(lineIndex);
}
@Override
@Override
public void clear() {
lines.clear();
lines.add("");
}
@Override
public void delete(Position start, Position end) {
if (start.isAfter(end)) {
throw new IllegalArgumentException("start position " + start +
" is after end position " + end);
}
String prefix = getLinePrefix(start);
String suffix = getLineSuffix(end);
String stichedLine = prefix + suffix;
lines.set(start.lineIndex(), stichedLine);
deleteLines(start.lineIndex() + 1, end.lineIndex() + 1);
incrementVersion();
}
private void deleteLines(int startIndex, int endIndex) {
for (int fromIndex = endIndex, toIndex = startIndex; fromIndex <= getMaxLineIndex();
++toIndex, ++fromIndex) {
lines.set(toIndex, lines.get(fromIndex));
}
truncate(getMaxLineIndex() - (endIndex - startIndex));
}
private void truncate(int newMaxLineIndex) {
while (getMaxLineIndex() > newMaxLineIndex) {
lines.remove(getMaxLineIndex());
}
}
private void incrementVersion() {
version = version.next();
}
} |
Could't this rather be accomplished using the `LinkedList`'s `ListIterator`? ```java for (ListIterator<String> i = lines.listIterator(startIndex); i.hasNext() && startIndex < endIndex; startIndex++) { i.next(); i.remove(); } ``` or just a for-loop with `remove(index)` over indices if a `LinkedHashSet` is used in lieu of the `LinkedList`? | private void deleteLines(int startIndex, int endIndex) {
int fromIndex = endIndex;
for (int toIndex = startIndex; fromIndex <= getMaxLineIndex(); ++toIndex, ++fromIndex) {
lines.set(toIndex, lines.get(fromIndex));
}
truncate(getMaxLineIndex() - (endIndex - startIndex));
} | } | private void deleteLines(int startIndex, int endIndex) {
for (int fromIndex = endIndex, toIndex = startIndex; fromIndex <= getMaxLineIndex();
++toIndex, ++fromIndex) {
lines.set(toIndex, lines.get(fromIndex));
}
truncate(getMaxLineIndex() - (endIndex - startIndex));
} | class TextBufferImpl implements TextBuffer {
private final LinkedList<String> lines = new LinkedList<>();
private Version version = new Version();
TextBufferImpl() {
lines.add("");
}
TextBufferImpl(String text) {
lines.add("");
write(getStartOfText(), text);
version = new Version();
}
@Override
public Version getVersion() {
return version;
}
@Override
public String getString() {
return String.join("\n", lines);
}
@Override
public int getMaxLineIndex() {
return lines.size() - 1;
}
@Override
public String getLine(int lineIndex) {
return lines.get(lineIndex);
}
@Override
public Position write(Position position, String text) {
List<String> linesToInsert = splitString(text, true, false);
if (linesToInsert.isEmpty()) {
return position;
}
int lineIndex = position.lineIndex();
String prefix = getLinePrefix(position);
linesToInsert.set(0, prefix + linesToInsert.get(0));
int numberOfLinesToInsert = linesToInsert.size() - 1;
String prefixOfCursorAfterwards = linesToInsert.get(numberOfLinesToInsert);
String suffix = getLineSuffix(position);
String lastLine = prefixOfCursorAfterwards + suffix;
linesToInsert.set(numberOfLinesToInsert, lastLine);
lines.set(lineIndex, linesToInsert.remove(0));
lines.addAll(lineIndex + 1, linesToInsert);
incrementVersion();
return new Position(lineIndex + linesToInsert.size(), prefixOfCursorAfterwards.length());
}
@Override
public void clear() {
lines.clear();
lines.add("");
}
@Override
public void delete(Position start, Position end) {
if (start.isAfter(end)) {
throw new IllegalArgumentException("start position " + start +
" is after end position " + end);
}
String prefix = getLinePrefix(start);
String suffix = getLineSuffix(end);
String stichedLine = prefix + suffix;
lines.set(start.lineIndex(), stichedLine);
deleteLines(start.lineIndex() + 1, end.lineIndex() + 1);
incrementVersion();
}
private void truncate(int newMaxLineIndex) {
while (getMaxLineIndex() > newMaxLineIndex) {
lines.remove(getMaxLineIndex());
}
}
private void incrementVersion() {
version = version.next();
}
} | class TextBufferImpl implements TextBuffer {
/** Invariant: {@code size() >= 1}. An empty text buffer {@code => [""]} */
private final ArrayList<String> lines = new ArrayList<>();
private Version version = new Version();
TextBufferImpl() {
lines.add("");
}
TextBufferImpl(String text) {
this();
write(getStartOfText(), text);
version = new Version();
}
@Override
public Version getVersion() {
return version;
}
@Override
public String getString() {
return String.join("\n", lines);
}
@Override
public int getMaxLineIndex() {
return lines.size() - 1;
}
@Override
public String getLine(int lineIndex) {
return lines.get(lineIndex);
}
@Override
public Position write(Position position, String text) {
List<String> linesToInsert = new LinkedList<>(splitString(text, true, false));
if (linesToInsert.isEmpty()) {
return position;
}
linesToInsert.set(0, getLinePrefix(position) + linesToInsert.get(0));
String lastLine = linesToInsert.get(linesToInsert.size() - 1);
int endColumnIndex = lastLine.length();
linesToInsert.set(linesToInsert.size() - 1, lastLine + getLineSuffix(position));
int lineIndex = position.lineIndex();
int endLineIndex = lineIndex + linesToInsert.size() - 1;
lines.set(lineIndex, linesToInsert.remove(0));
lines.addAll(lineIndex + 1, linesToInsert);
incrementVersion();
return new Position(endLineIndex, endColumnIndex);
}
@Override
public void clear() {
lines.clear();
lines.add("");
}
@Override
public void delete(Position start, Position end) {
if (start.isAfter(end)) {
throw new IllegalArgumentException("start position " + start +
" is after end position " + end);
}
String prefix = getLinePrefix(start);
String suffix = getLineSuffix(end);
String stichedLine = prefix + suffix;
lines.set(start.lineIndex(), stichedLine);
deleteLines(start.lineIndex() + 1, end.lineIndex() + 1);
incrementVersion();
}
private void truncate(int newMaxLineIndex) {
while (getMaxLineIndex() > newMaxLineIndex) {
lines.remove(getMaxLineIndex());
}
}
private void incrementVersion() {
version = version.next();
}
} |
Perhaps you should also test writes and deletes in the interior of the buffer as part of the most basic stuff. I see it's tester further down, but only as part of more complex stuff. | public void testBasics() {
assertCursor(0, 0, "");
cursor.write("hello");
assertCursor(0, 5, "hello");
cursor.write("one\ntwo");
assertCursor(1, 3, "helloone\ntwo");
cursor.deleteAll();
assertCursor(0, 0, "");
cursor.moveForward();
assertCursor(0, 0, "");
cursor.writeLine("foo");
assertCursor(1, 0, "foo\n");
cursor.writeLines("one", "two");
assertCursor(3, 0, "foo\none\ntwo\n");
cursor.deleteBackward();
assertCursor(2, 3, "foo\none\ntwo");
cursor.deleteBackward(2);
assertCursor(2, 1, "foo\none\nt");
Mark mark = cursor.createMark();
cursor.moveToStartOfPreviousLine().moveBackward(2);
assertCursor(0, 2, "foo\none\nt");
assertEquals("o\none\nt", cursor.getTextTo(mark));
cursor.deleteTo(mark);
assertCursor(0, 2, "fo");
cursor.deleteBackward(2);
assertCursor(0, 0, "");
cursor.writeLines("one", "two", "three").moveToStartOfBuffer();
assertCursor(0, 0, "one\ntwo\nthree\n");
Pattern pattern = Pattern.compile("t(.)");
Optional<Match> match = cursor.moveForwardToEndOfMatch(pattern);
assertCursor(1, 2, "one\ntwo\nthree\n");
assertTrue(match.isPresent());
assertEquals("tw", match.get().match());
assertEquals("", match.get().prefix());
assertEquals("o", match.get().suffix());
assertEquals(new Position(1, 0), match.get().startOfMatch());
assertEquals(new Position(1, 2), match.get().endOfMatch());
assertEquals(1, match.get().groupCount());
assertEquals("w", match.get().group(1));
match = cursor.moveForwardToEndOfMatch(pattern);
assertCursor(2, 2, "one\ntwo\nthree\n");
assertTrue(match.isPresent());
assertEquals("th", match.get().match());
assertEquals(1, match.get().groupCount());
assertEquals("h", match.get().group(1));
match = cursor.moveForwardToEndOfMatch(pattern);
assertCursor(2, 2, "one\ntwo\nthree\n");
assertFalse(match.isPresent());
assertTrue(cursor.skipBackward("h"));
assertCursor(2, 1, "one\ntwo\nthree\n");
assertFalse(cursor.skipBackward("x"));
assertTrue(cursor.skipForward("hre"));
assertCursor(2, 4, "one\ntwo\nthree\n");
assertFalse(cursor.skipForward("x"));
try {
cursor.moveTo(mark);
fail();
} catch (IllegalArgumentException e) {
}
mark = cursor.createMark();
cursor.moveToStartOfBuffer();
assertEquals(new Position(0, 0), cursor.getPosition());
cursor.moveTo(mark);
assertEquals(new Position(2, 4), cursor.getPosition());
cursor.moveTo(1, 2);
assertCursor(1, 2, "one\ntwo\nthree\n");
cursor.deleteSuffix();
assertCursor(1, 2, "one\ntw\nthree\n");
cursor.deletePrefix();
assertCursor(1, 0, "one\n\nthree\n");
cursor.deleteLine();
assertCursor(1, 0, "one\nthree\n");
cursor.deleteLine();
assertCursor(1, 0, "one\n");
cursor.deleteLine();
assertCursor(1, 0, "one\n");
cursor.moveToStartOfBuffer().moveForward().writeNewlineAfter();
assertCursor(0, 1, "o\nne\n");
cursor.deleteAll().writeLines("one", "two", "three", "four");
cursor.moveToStartOfBuffer().moveToStartOfNextLine();
assertCursor(1, 0, "one\ntwo\nthree\nfour\n");
Pattern pattern2 = Pattern.compile("(o)(.)?");
int count = cursor.replaceMatches(pattern2, m -> {
String prefix = m.group(2) == null ? "" : m.group(2);
return prefix + m.match() + m.group(1);
});
assertCursor(3, 5, "one\ntwoo\nthree\nfuouor\n");
assertEquals(2, count);
cursor.moveToStartOfBuffer().moveToEndOfLine();
Pattern pattern3 = Pattern.compile("o");
count = cursor.replaceMatches(pattern3, m -> "a");
assertEquals(4, count);
assertCursor(3, 5, "one\ntwaa\nthree\nfuauar\n");
} | assertCursor(1, 0, "foo\n"); | public void testBasics() {
assertCursor(0, 0, "");
cursor.write("hello");
assertCursor(0, 5, "hello");
cursor.write("one\ntwo");
assertCursor(1, 3, "helloone\ntwo");
cursor.deleteAll();
assertCursor(0, 0, "");
cursor.moveForward();
assertCursor(0, 0, "");
cursor.writeLine("foo");
assertCursor(1, 0, "foo\n");
cursor.writeLines("one", "two");
assertCursor(3, 0, "foo\none\ntwo\n");
cursor.deleteBackward();
assertCursor(2, 3, "foo\none\ntwo");
cursor.deleteBackward(2);
assertCursor(2, 1, "foo\none\nt");
Mark mark = cursor.createMark();
cursor.moveToStartOfPreviousLine().moveBackward(2);
assertCursor(0, 2, "foo\none\nt");
assertEquals("o\none\nt", cursor.getTextTo(mark));
cursor.deleteTo(mark);
assertCursor(0, 2, "fo");
cursor.deleteBackward(2);
assertCursor(0, 0, "");
cursor.writeLines("one", "two", "three").moveToStartOfBuffer();
assertCursor(0, 0, "one\ntwo\nthree\n");
Pattern pattern = Pattern.compile("t(.)");
Optional<Match> match = cursor.moveForwardToEndOfMatch(pattern);
assertCursor(1, 2, "one\ntwo\nthree\n");
assertTrue(match.isPresent());
assertEquals("tw", match.get().match());
assertEquals("", match.get().prefix());
assertEquals("o", match.get().suffix());
assertEquals(new Position(1, 0), match.get().startOfMatch());
assertEquals(new Position(1, 2), match.get().endOfMatch());
assertEquals(1, match.get().groupCount());
assertEquals("w", match.get().group(1));
match = cursor.moveForwardToEndOfMatch(pattern);
assertCursor(2, 2, "one\ntwo\nthree\n");
assertTrue(match.isPresent());
assertEquals("th", match.get().match());
assertEquals(1, match.get().groupCount());
assertEquals("h", match.get().group(1));
match = cursor.moveForwardToEndOfMatch(pattern);
assertCursor(2, 2, "one\ntwo\nthree\n");
assertFalse(match.isPresent());
assertTrue(cursor.skipBackward("h"));
assertCursor(2, 1, "one\ntwo\nthree\n");
assertFalse(cursor.skipBackward("x"));
assertTrue(cursor.skipForward("hre"));
assertCursor(2, 4, "one\ntwo\nthree\n");
assertFalse(cursor.skipForward("x"));
try {
cursor.moveTo(mark);
fail();
} catch (IllegalArgumentException e) {
}
mark = cursor.createMark();
cursor.moveToStartOfBuffer();
assertEquals(new Position(0, 0), cursor.getPosition());
cursor.moveTo(mark);
assertEquals(new Position(2, 4), cursor.getPosition());
cursor.moveTo(1, 2);
assertCursor(1, 2, "one\ntwo\nthree\n");
cursor.deleteSuffix();
assertCursor(1, 2, "one\ntw\nthree\n");
cursor.deletePrefix();
assertCursor(1, 0, "one\n\nthree\n");
cursor.deleteLine();
assertCursor(1, 0, "one\nthree\n");
cursor.deleteLine();
assertCursor(1, 0, "one\n");
cursor.deleteLine();
assertCursor(1, 0, "one\n");
cursor.moveToStartOfBuffer().moveForward().writeNewlineAfter();
assertCursor(0, 1, "o\nne\n");
cursor.deleteAll().writeLines("one", "two", "three", "four");
cursor.moveToStartOfBuffer().moveToStartOfNextLine();
assertCursor(1, 0, "one\ntwo\nthree\nfour\n");
Pattern pattern2 = Pattern.compile("(o)(.)?");
int count = cursor.replaceMatches(pattern2, m -> {
String prefix = m.group(2) == null ? "" : m.group(2);
return prefix + m.match() + m.group(1);
});
assertCursor(3, 5, "one\ntwoo\nthree\nfuouor\n");
assertEquals(2, count);
cursor.moveToStartOfBuffer().moveToEndOfLine();
Pattern pattern3 = Pattern.compile("o");
count = cursor.replaceMatches(pattern3, m -> "a");
assertEquals(4, count);
assertCursor(3, 5, "one\ntwaa\nthree\nfuauar\n");
} | class StringEditorTest {
private final StringEditor editor = new StringEditor();
private final Cursor cursor = editor.cursor();
@Test
private void assertCursor(int lineIndex, int columnIndex, String text) {
assertEquals(text, cursor.getBufferText());
assertEquals(new Position(lineIndex, columnIndex), cursor.getPosition());
}
} | class StringEditorTest {
private final StringEditor editor = new StringEditor();
private final Cursor cursor = editor.cursor();
@Test
private void assertCursor(int lineIndex, int columnIndex, String text) {
assertEquals(text, cursor.getBufferText());
assertEquals(new Position(lineIndex, columnIndex), cursor.getPosition());
}
} |
Ah, I see this is also tested in the TestBufferImplTest. Then it's not much of a point. | public void testBasics() {
assertCursor(0, 0, "");
cursor.write("hello");
assertCursor(0, 5, "hello");
cursor.write("one\ntwo");
assertCursor(1, 3, "helloone\ntwo");
cursor.deleteAll();
assertCursor(0, 0, "");
cursor.moveForward();
assertCursor(0, 0, "");
cursor.writeLine("foo");
assertCursor(1, 0, "foo\n");
cursor.writeLines("one", "two");
assertCursor(3, 0, "foo\none\ntwo\n");
cursor.deleteBackward();
assertCursor(2, 3, "foo\none\ntwo");
cursor.deleteBackward(2);
assertCursor(2, 1, "foo\none\nt");
Mark mark = cursor.createMark();
cursor.moveToStartOfPreviousLine().moveBackward(2);
assertCursor(0, 2, "foo\none\nt");
assertEquals("o\none\nt", cursor.getTextTo(mark));
cursor.deleteTo(mark);
assertCursor(0, 2, "fo");
cursor.deleteBackward(2);
assertCursor(0, 0, "");
cursor.writeLines("one", "two", "three").moveToStartOfBuffer();
assertCursor(0, 0, "one\ntwo\nthree\n");
Pattern pattern = Pattern.compile("t(.)");
Optional<Match> match = cursor.moveForwardToEndOfMatch(pattern);
assertCursor(1, 2, "one\ntwo\nthree\n");
assertTrue(match.isPresent());
assertEquals("tw", match.get().match());
assertEquals("", match.get().prefix());
assertEquals("o", match.get().suffix());
assertEquals(new Position(1, 0), match.get().startOfMatch());
assertEquals(new Position(1, 2), match.get().endOfMatch());
assertEquals(1, match.get().groupCount());
assertEquals("w", match.get().group(1));
match = cursor.moveForwardToEndOfMatch(pattern);
assertCursor(2, 2, "one\ntwo\nthree\n");
assertTrue(match.isPresent());
assertEquals("th", match.get().match());
assertEquals(1, match.get().groupCount());
assertEquals("h", match.get().group(1));
match = cursor.moveForwardToEndOfMatch(pattern);
assertCursor(2, 2, "one\ntwo\nthree\n");
assertFalse(match.isPresent());
assertTrue(cursor.skipBackward("h"));
assertCursor(2, 1, "one\ntwo\nthree\n");
assertFalse(cursor.skipBackward("x"));
assertTrue(cursor.skipForward("hre"));
assertCursor(2, 4, "one\ntwo\nthree\n");
assertFalse(cursor.skipForward("x"));
try {
cursor.moveTo(mark);
fail();
} catch (IllegalArgumentException e) {
}
mark = cursor.createMark();
cursor.moveToStartOfBuffer();
assertEquals(new Position(0, 0), cursor.getPosition());
cursor.moveTo(mark);
assertEquals(new Position(2, 4), cursor.getPosition());
cursor.moveTo(1, 2);
assertCursor(1, 2, "one\ntwo\nthree\n");
cursor.deleteSuffix();
assertCursor(1, 2, "one\ntw\nthree\n");
cursor.deletePrefix();
assertCursor(1, 0, "one\n\nthree\n");
cursor.deleteLine();
assertCursor(1, 0, "one\nthree\n");
cursor.deleteLine();
assertCursor(1, 0, "one\n");
cursor.deleteLine();
assertCursor(1, 0, "one\n");
cursor.moveToStartOfBuffer().moveForward().writeNewlineAfter();
assertCursor(0, 1, "o\nne\n");
cursor.deleteAll().writeLines("one", "two", "three", "four");
cursor.moveToStartOfBuffer().moveToStartOfNextLine();
assertCursor(1, 0, "one\ntwo\nthree\nfour\n");
Pattern pattern2 = Pattern.compile("(o)(.)?");
int count = cursor.replaceMatches(pattern2, m -> {
String prefix = m.group(2) == null ? "" : m.group(2);
return prefix + m.match() + m.group(1);
});
assertCursor(3, 5, "one\ntwoo\nthree\nfuouor\n");
assertEquals(2, count);
cursor.moveToStartOfBuffer().moveToEndOfLine();
Pattern pattern3 = Pattern.compile("o");
count = cursor.replaceMatches(pattern3, m -> "a");
assertEquals(4, count);
assertCursor(3, 5, "one\ntwaa\nthree\nfuauar\n");
} | assertCursor(1, 0, "foo\n"); | public void testBasics() {
assertCursor(0, 0, "");
cursor.write("hello");
assertCursor(0, 5, "hello");
cursor.write("one\ntwo");
assertCursor(1, 3, "helloone\ntwo");
cursor.deleteAll();
assertCursor(0, 0, "");
cursor.moveForward();
assertCursor(0, 0, "");
cursor.writeLine("foo");
assertCursor(1, 0, "foo\n");
cursor.writeLines("one", "two");
assertCursor(3, 0, "foo\none\ntwo\n");
cursor.deleteBackward();
assertCursor(2, 3, "foo\none\ntwo");
cursor.deleteBackward(2);
assertCursor(2, 1, "foo\none\nt");
Mark mark = cursor.createMark();
cursor.moveToStartOfPreviousLine().moveBackward(2);
assertCursor(0, 2, "foo\none\nt");
assertEquals("o\none\nt", cursor.getTextTo(mark));
cursor.deleteTo(mark);
assertCursor(0, 2, "fo");
cursor.deleteBackward(2);
assertCursor(0, 0, "");
cursor.writeLines("one", "two", "three").moveToStartOfBuffer();
assertCursor(0, 0, "one\ntwo\nthree\n");
Pattern pattern = Pattern.compile("t(.)");
Optional<Match> match = cursor.moveForwardToEndOfMatch(pattern);
assertCursor(1, 2, "one\ntwo\nthree\n");
assertTrue(match.isPresent());
assertEquals("tw", match.get().match());
assertEquals("", match.get().prefix());
assertEquals("o", match.get().suffix());
assertEquals(new Position(1, 0), match.get().startOfMatch());
assertEquals(new Position(1, 2), match.get().endOfMatch());
assertEquals(1, match.get().groupCount());
assertEquals("w", match.get().group(1));
match = cursor.moveForwardToEndOfMatch(pattern);
assertCursor(2, 2, "one\ntwo\nthree\n");
assertTrue(match.isPresent());
assertEquals("th", match.get().match());
assertEquals(1, match.get().groupCount());
assertEquals("h", match.get().group(1));
match = cursor.moveForwardToEndOfMatch(pattern);
assertCursor(2, 2, "one\ntwo\nthree\n");
assertFalse(match.isPresent());
assertTrue(cursor.skipBackward("h"));
assertCursor(2, 1, "one\ntwo\nthree\n");
assertFalse(cursor.skipBackward("x"));
assertTrue(cursor.skipForward("hre"));
assertCursor(2, 4, "one\ntwo\nthree\n");
assertFalse(cursor.skipForward("x"));
try {
cursor.moveTo(mark);
fail();
} catch (IllegalArgumentException e) {
}
mark = cursor.createMark();
cursor.moveToStartOfBuffer();
assertEquals(new Position(0, 0), cursor.getPosition());
cursor.moveTo(mark);
assertEquals(new Position(2, 4), cursor.getPosition());
cursor.moveTo(1, 2);
assertCursor(1, 2, "one\ntwo\nthree\n");
cursor.deleteSuffix();
assertCursor(1, 2, "one\ntw\nthree\n");
cursor.deletePrefix();
assertCursor(1, 0, "one\n\nthree\n");
cursor.deleteLine();
assertCursor(1, 0, "one\nthree\n");
cursor.deleteLine();
assertCursor(1, 0, "one\n");
cursor.deleteLine();
assertCursor(1, 0, "one\n");
cursor.moveToStartOfBuffer().moveForward().writeNewlineAfter();
assertCursor(0, 1, "o\nne\n");
cursor.deleteAll().writeLines("one", "two", "three", "four");
cursor.moveToStartOfBuffer().moveToStartOfNextLine();
assertCursor(1, 0, "one\ntwo\nthree\nfour\n");
Pattern pattern2 = Pattern.compile("(o)(.)?");
int count = cursor.replaceMatches(pattern2, m -> {
String prefix = m.group(2) == null ? "" : m.group(2);
return prefix + m.match() + m.group(1);
});
assertCursor(3, 5, "one\ntwoo\nthree\nfuouor\n");
assertEquals(2, count);
cursor.moveToStartOfBuffer().moveToEndOfLine();
Pattern pattern3 = Pattern.compile("o");
count = cursor.replaceMatches(pattern3, m -> "a");
assertEquals(4, count);
assertCursor(3, 5, "one\ntwaa\nthree\nfuauar\n");
} | class StringEditorTest {
private final StringEditor editor = new StringEditor();
private final Cursor cursor = editor.cursor();
@Test
private void assertCursor(int lineIndex, int columnIndex, String text) {
assertEquals(text, cursor.getBufferText());
assertEquals(new Position(lineIndex, columnIndex), cursor.getPosition());
}
} | class StringEditorTest {
private final StringEditor editor = new StringEditor();
private final Cursor cursor = editor.cursor();
@Test
private void assertCursor(int lineIndex, int columnIndex, String text) {
assertEquals(text, cursor.getBufferText());
assertEquals(new Position(lineIndex, columnIndex), cursor.getPosition());
}
} |
This breaks your style, of course, so adapt if you choose to use it. | private void deleteLines(int startIndex, int endIndex) {
int fromIndex = endIndex;
for (int toIndex = startIndex; fromIndex <= getMaxLineIndex(); ++toIndex, ++fromIndex) {
lines.set(toIndex, lines.get(fromIndex));
}
truncate(getMaxLineIndex() - (endIndex - startIndex));
} | } | private void deleteLines(int startIndex, int endIndex) {
for (int fromIndex = endIndex, toIndex = startIndex; fromIndex <= getMaxLineIndex();
++toIndex, ++fromIndex) {
lines.set(toIndex, lines.get(fromIndex));
}
truncate(getMaxLineIndex() - (endIndex - startIndex));
} | class TextBufferImpl implements TextBuffer {
private final LinkedList<String> lines = new LinkedList<>();
private Version version = new Version();
TextBufferImpl() {
lines.add("");
}
TextBufferImpl(String text) {
lines.add("");
write(getStartOfText(), text);
version = new Version();
}
@Override
public Version getVersion() {
return version;
}
@Override
public String getString() {
return String.join("\n", lines);
}
@Override
public int getMaxLineIndex() {
return lines.size() - 1;
}
@Override
public String getLine(int lineIndex) {
return lines.get(lineIndex);
}
@Override
public Position write(Position position, String text) {
List<String> linesToInsert = splitString(text, true, false);
if (linesToInsert.isEmpty()) {
return position;
}
int lineIndex = position.lineIndex();
String prefix = getLinePrefix(position);
linesToInsert.set(0, prefix + linesToInsert.get(0));
int numberOfLinesToInsert = linesToInsert.size() - 1;
String prefixOfCursorAfterwards = linesToInsert.get(numberOfLinesToInsert);
String suffix = getLineSuffix(position);
String lastLine = prefixOfCursorAfterwards + suffix;
linesToInsert.set(numberOfLinesToInsert, lastLine);
lines.set(lineIndex, linesToInsert.remove(0));
lines.addAll(lineIndex + 1, linesToInsert);
incrementVersion();
return new Position(lineIndex + linesToInsert.size(), prefixOfCursorAfterwards.length());
}
@Override
public void clear() {
lines.clear();
lines.add("");
}
@Override
public void delete(Position start, Position end) {
if (start.isAfter(end)) {
throw new IllegalArgumentException("start position " + start +
" is after end position " + end);
}
String prefix = getLinePrefix(start);
String suffix = getLineSuffix(end);
String stichedLine = prefix + suffix;
lines.set(start.lineIndex(), stichedLine);
deleteLines(start.lineIndex() + 1, end.lineIndex() + 1);
incrementVersion();
}
private void truncate(int newMaxLineIndex) {
while (getMaxLineIndex() > newMaxLineIndex) {
lines.remove(getMaxLineIndex());
}
}
private void incrementVersion() {
version = version.next();
}
} | class TextBufferImpl implements TextBuffer {
/** Invariant: {@code size() >= 1}. An empty text buffer {@code => [""]} */
private final ArrayList<String> lines = new ArrayList<>();
private Version version = new Version();
TextBufferImpl() {
lines.add("");
}
TextBufferImpl(String text) {
this();
write(getStartOfText(), text);
version = new Version();
}
@Override
public Version getVersion() {
return version;
}
@Override
public String getString() {
return String.join("\n", lines);
}
@Override
public int getMaxLineIndex() {
return lines.size() - 1;
}
@Override
public String getLine(int lineIndex) {
return lines.get(lineIndex);
}
@Override
public Position write(Position position, String text) {
List<String> linesToInsert = new LinkedList<>(splitString(text, true, false));
if (linesToInsert.isEmpty()) {
return position;
}
linesToInsert.set(0, getLinePrefix(position) + linesToInsert.get(0));
String lastLine = linesToInsert.get(linesToInsert.size() - 1);
int endColumnIndex = lastLine.length();
linesToInsert.set(linesToInsert.size() - 1, lastLine + getLineSuffix(position));
int lineIndex = position.lineIndex();
int endLineIndex = lineIndex + linesToInsert.size() - 1;
lines.set(lineIndex, linesToInsert.remove(0));
lines.addAll(lineIndex + 1, linesToInsert);
incrementVersion();
return new Position(endLineIndex, endColumnIndex);
}
@Override
public void clear() {
lines.clear();
lines.add("");
}
@Override
public void delete(Position start, Position end) {
if (start.isAfter(end)) {
throw new IllegalArgumentException("start position " + start +
" is after end position " + end);
}
String prefix = getLinePrefix(start);
String suffix = getLineSuffix(end);
String stichedLine = prefix + suffix;
lines.set(start.lineIndex(), stichedLine);
deleteLines(start.lineIndex() + 1, end.lineIndex() + 1);
incrementVersion();
}
private void truncate(int newMaxLineIndex) {
while (getMaxLineIndex() > newMaxLineIndex) {
lines.remove(getMaxLineIndex());
}
}
private void incrementVersion() {
version = version.next();
}
} |
Done | public Cursor deleteTo(Mark mark) {
Position start = mark.position();
Position end = position;
if (start.isAfter(end)) {
Position tmp = start;
start = end;
end = tmp;
}
textBuffer.delete(start, end);
return this;
} | Position start = mark.position(); | public Cursor deleteTo(Mark mark) {
validateMark(mark);
Position start = min(mark.position(), position);
Position end = max(mark.position(), position);
textBuffer.delete(start, end);
return this;
} | class CursorImpl implements Cursor {
private final TextBuffer textBuffer;
private final Object unique = new Object();
private Position position;
/**
* Creates a cursor to a text buffer.
*
* WARNING: The text buffer MUST NOT be accessed outside this cursor. This cursor
* takes sole ownership of the text buffer.
*
* @param textBuffer the text buffer this cursor owns and operates on
*/
CursorImpl(TextBuffer textBuffer) {
this.textBuffer = textBuffer;
position = textBuffer.getStartOfText();
}
@Override
public Position getPosition() {
return position;
}
@Override
public Mark createMark() {
return new Mark(position, textBuffer.getVersion(), unique);
}
@Override
public String getBufferText() {
return textBuffer.getString();
}
@Override
public String getLine() {
return textBuffer.getLine(position);
}
@Override
public String getPrefix() {
return textBuffer.getLinePrefix(position);
}
@Override
public String getSuffix() {
return textBuffer.getLineSuffix(position);
}
@Override
public String getTextTo(Mark mark) {
validateMark(mark);
Position start = mark.position();
Position end = position;
if (start.isAfter(end)) {
Position tmp = start;
start = end;
end = tmp;
}
return textBuffer.getSubstring(start, end);
}
@Override
public Cursor moveToStartOfBuffer() {
position = textBuffer.getStartOfText();
return this;
}
@Override
public Cursor moveToEndOfBuffer() {
position = textBuffer.getEndOfText();
return this;
}
@Override
public Cursor moveToStartOfLine() {
position = textBuffer.getStartOfLine(position);
return this;
}
@Override
public Cursor moveToStartOfPreviousLine() {
position = textBuffer.getStartOfPreviousLine(position);
return this;
}
@Override
public Cursor moveToStartOfNextLine() {
position = textBuffer.getStartOfNextLine(position);
return this;
}
@Override
public Cursor moveToStartOf(int lineIndex) {
validateLineIndex(lineIndex);
position = new Position(lineIndex, 0);
return this;
}
@Override
public Cursor moveToEndOfLine() {
position = textBuffer.getEndOfLine(position);
return this;
}
@Override
public Cursor moveToEndOfPreviousLine() {
return moveToStartOfPreviousLine().moveToEndOfLine();
}
@Override
public Cursor moveToEndOfNextLine() {
return moveToStartOfNextLine().moveToEndOfLine();
}
@Override
public Cursor moveToEndOf(int lineIndex) {
return moveToStartOf(lineIndex).moveToEndOfLine();
}
@Override
public Cursor moveForward() {
return moveForward(1);
}
@Override
public Cursor moveForward(int times) {
position = textBuffer.forward(position, times);
return this;
}
@Override
public Cursor moveBackward() {
return moveBackward(1);
}
@Override
public Cursor moveBackward(int times) {
position = textBuffer.backward(position, times);
return this;
}
@Override
public Cursor moveTo(Mark mark) {
validateMark(mark);
position = mark.position();
return this;
}
@Override
public boolean skipBackward(String text) {
String prefix = getPrefix();
if (prefix.endsWith(text)) {
position = new Position(position.lineIndex(), position.columnIndex() - text.length());
return true;
} else {
return false;
}
}
@Override
public boolean skipForward(String text) {
String suffix = getSuffix();
if (suffix.startsWith(text)) {
position = new Position(position.lineIndex(), position.columnIndex() + text.length());
return true;
} else {
return false;
}
}
@Override
public Optional<Match> moveForwardToStartOfMatch(Pattern pattern) {
return moveForwardToXOfMatch(pattern, match -> position = match.startOfMatch());
}
@Override
public Optional<Match> moveForwardToEndOfMatch(Pattern pattern) {
return moveForwardToXOfMatch(pattern, match -> position = match.endOfMatch());
}
private Optional<Match> moveForwardToXOfMatch(Pattern pattern, Consumer<Match> callback) {
Optional<Match> match = textBuffer.findForward(position, pattern);
match.ifPresent(callback);
return match;
}
@Override
public Cursor moveTo(Position position) {
validatePosition(position);
this.position = position;
return this;
}
@Override
public Cursor moveTo(int lineIndex, int columnIndex) {
return moveTo(new Position(lineIndex, columnIndex));
}
@Override
public Cursor write(String text) {
position = textBuffer.write(position, text);
return this;
}
@Override
public Cursor writeLine(String line) {
return write(line).write("\n");
}
@Override
public Cursor writeLines(String... lines) {
return writeLines(Arrays.asList(lines));
}
@Override
public Cursor writeLines(List<String> lines) {
return writeLine(String.join("\n", lines));
}
@Override
public Cursor writeNewline() {
return write("\n");
}
@Override
public Cursor writeNewlineAfter() {
return writeNewline().moveBackward();
}
@Override
public Cursor deleteAll() {
moveToStartOfBuffer();
textBuffer.clear();
return this;
}
@Override
public Cursor deleteLine() {
moveToStartOfLine();
textBuffer.delete(position, textBuffer.getStartOfNextLine(position));
return this;
}
@Override
public Cursor deletePrefix() {
Position originalPosition = position;
moveToStartOfLine();
textBuffer.delete(position, originalPosition);
return this;
}
@Override
public Cursor deleteSuffix() {
textBuffer.delete(position, textBuffer.getEndOfLine(position));
return this;
}
@Override
public Cursor deleteForward() {
return deleteForward(1);
}
@Override
public Cursor deleteForward(int times) {
Position end = textBuffer.forward(position, times);
textBuffer.delete(position, end);
return this;
}
@Override
public Cursor deleteBackward() {
return deleteBackward(1);
}
@Override
public Cursor deleteBackward(int times) {
Position end = position;
moveBackward(times);
textBuffer.delete(position, end);
return this;
}
@Override
@Override
public boolean replaceMatch(Pattern pattern, Function<Match, String> replacer) {
Optional<Match> match = moveForwardToStartOfMatch(pattern);
if (!match.isPresent()) {
return false;
}
textBuffer.delete(match.get().startOfMatch(), match.get().endOfMatch());
write(replacer.apply(match.get()));
return true;
}
@Override
public int replaceMatches(Pattern pattern, Function<Match, String> replacer) {
int count = 0;
for (; replaceMatch(pattern, replacer); ++count) {
}
return count;
}
private void validatePosition(Position position) {
validateLineIndex(position.lineIndex());
int maxColumnIndex = textBuffer.getLine(position.lineIndex()).length();
if (position.columnIndex() < 0 || position.columnIndex() > maxColumnIndex) {
throw new IndexOutOfBoundsException("Column index of " + position.coordinateString() +
" is not in permitted range [0," + maxColumnIndex + "]");
}
}
private void validateLineIndex(int lineIndex) {
int maxLineIndex = textBuffer.getMaxLineIndex();
if (lineIndex < 0 || lineIndex > maxLineIndex) {
throw new IndexOutOfBoundsException("Line index " + lineIndex +
" not in permitted range [0," + maxLineIndex + "]");
}
}
private void validateMark(Mark mark) {
if (mark.secret() != unique) {
throw new IllegalArgumentException("Unknown mark " + mark);
}
if (!mark.version().equals(textBuffer.getVersion())) {
throw new IllegalArgumentException("Mark " + mark + " is outdated");
}
}
} | class CursorImpl implements Cursor {
private final TextBuffer textBuffer;
private final Object unique = new Object();
private Position position;
/**
* Creates a cursor to a text buffer.
*
* WARNING: The text buffer MUST NOT be accessed outside this cursor. This cursor
* takes sole ownership of the text buffer.
*
* @param textBuffer the text buffer this cursor owns and operates on
*/
CursorImpl(TextBuffer textBuffer) {
this.textBuffer = textBuffer;
position = textBuffer.getStartOfText();
}
@Override
public Position getPosition() {
return position;
}
@Override
public Mark createMark() {
return new Mark(position, textBuffer.getVersion(), unique);
}
@Override
public String getBufferText() {
return textBuffer.getString();
}
@Override
public String getLine() {
return textBuffer.getLine(position);
}
@Override
public String getPrefix() {
return textBuffer.getLinePrefix(position);
}
@Override
public String getSuffix() {
return textBuffer.getLineSuffix(position);
}
@Override
public String getTextTo(Mark mark) {
validateMark(mark);
Position start = min(mark.position(), position);
Position end = max(mark.position(), position);
return textBuffer.getSubstring(start, end);
}
@Override
public Cursor moveToStartOfBuffer() {
position = textBuffer.getStartOfText();
return this;
}
@Override
public Cursor moveToEndOfBuffer() {
position = textBuffer.getEndOfText();
return this;
}
@Override
public Cursor moveToStartOfLine() {
position = textBuffer.getStartOfLine(position);
return this;
}
@Override
public Cursor moveToStartOfPreviousLine() {
position = textBuffer.getStartOfPreviousLine(position);
return this;
}
@Override
public Cursor moveToStartOfNextLine() {
position = textBuffer.getStartOfNextLine(position);
return this;
}
@Override
public Cursor moveToStartOf(int lineIndex) {
validateLineIndex(lineIndex);
position = new Position(lineIndex, 0);
return this;
}
@Override
public Cursor moveToEndOfLine() {
position = textBuffer.getEndOfLine(position);
return this;
}
@Override
public Cursor moveToEndOfPreviousLine() {
return moveToStartOfPreviousLine().moveToEndOfLine();
}
@Override
public Cursor moveToEndOfNextLine() {
return moveToStartOfNextLine().moveToEndOfLine();
}
@Override
public Cursor moveToEndOf(int lineIndex) {
return moveToStartOf(lineIndex).moveToEndOfLine();
}
@Override
public Cursor moveForward() {
return moveForward(1);
}
@Override
public Cursor moveForward(int times) {
position = textBuffer.forward(position, times);
return this;
}
@Override
public Cursor moveBackward() {
return moveBackward(1);
}
@Override
public Cursor moveBackward(int times) {
position = textBuffer.backward(position, times);
return this;
}
@Override
public Cursor moveTo(Mark mark) {
validateMark(mark);
position = mark.position();
return this;
}
@Override
public boolean skipBackward(String text) {
String prefix = getPrefix();
if (prefix.endsWith(text)) {
position = new Position(position.lineIndex(), position.columnIndex() - text.length());
return true;
} else {
return false;
}
}
@Override
public boolean skipForward(String text) {
String suffix = getSuffix();
if (suffix.startsWith(text)) {
position = new Position(position.lineIndex(), position.columnIndex() + text.length());
return true;
} else {
return false;
}
}
@Override
public Optional<Match> moveForwardToStartOfMatch(Pattern pattern) {
return moveForwardToXOfMatch(pattern, match -> position = match.startOfMatch());
}
@Override
public Optional<Match> moveForwardToEndOfMatch(Pattern pattern) {
return moveForwardToXOfMatch(pattern, match -> position = match.endOfMatch());
}
private Optional<Match> moveForwardToXOfMatch(Pattern pattern, Consumer<Match> callback) {
Optional<Match> match = textBuffer.findForward(position, pattern);
match.ifPresent(callback);
return match;
}
@Override
public Cursor moveTo(Position position) {
validatePosition(position);
this.position = position;
return this;
}
@Override
public Cursor moveTo(int lineIndex, int columnIndex) {
return moveTo(new Position(lineIndex, columnIndex));
}
@Override
public Cursor write(String text) {
position = textBuffer.write(position, text);
return this;
}
@Override
public Cursor writeLine(String line) {
return write(line).write("\n");
}
@Override
public Cursor writeLines(String... lines) {
return writeLines(Arrays.asList(lines));
}
@Override
public Cursor writeLines(Iterable<String> lines) {
return writeLine(String.join("\n", lines));
}
@Override
public Cursor writeNewline() {
return write("\n");
}
@Override
public Cursor writeNewlineAfter() {
return writeNewline().moveBackward();
}
@Override
public Cursor deleteAll() {
moveToStartOfBuffer();
textBuffer.clear();
return this;
}
@Override
public Cursor deleteLine() {
moveToStartOfLine();
textBuffer.delete(position, textBuffer.getStartOfNextLine(position));
return this;
}
@Override
public Cursor deletePrefix() {
Position originalPosition = position;
moveToStartOfLine();
textBuffer.delete(position, originalPosition);
return this;
}
@Override
public Cursor deleteSuffix() {
textBuffer.delete(position, textBuffer.getEndOfLine(position));
return this;
}
@Override
public Cursor deleteForward() {
return deleteForward(1);
}
@Override
public Cursor deleteForward(int times) {
Position end = textBuffer.forward(position, times);
textBuffer.delete(position, end);
return this;
}
@Override
public Cursor deleteBackward() {
return deleteBackward(1);
}
@Override
public Cursor deleteBackward(int times) {
Position end = position;
moveBackward(times);
textBuffer.delete(position, end);
return this;
}
@Override
@Override
public boolean replaceMatch(Pattern pattern, Function<Match, String> replacer) {
Optional<Match> match = moveForwardToStartOfMatch(pattern);
if (!match.isPresent()) {
return false;
}
textBuffer.delete(match.get().startOfMatch(), match.get().endOfMatch());
write(replacer.apply(match.get()));
return true;
}
@Override
public int replaceMatches(Pattern pattern, Function<Match, String> replacer) {
int count = 0;
for (; replaceMatch(pattern, replacer); ++count) {
}
return count;
}
private void validatePosition(Position position) {
validateLineIndex(position.lineIndex());
int maxColumnIndex = textBuffer.getLine(position.lineIndex()).length();
if (position.columnIndex() < 0 || position.columnIndex() > maxColumnIndex) {
throw new IndexOutOfBoundsException("Column index of " + position.coordinateString() +
" is not in permitted range [0," + maxColumnIndex + "]");
}
}
private void validateLineIndex(int lineIndex) {
int maxLineIndex = textBuffer.getMaxLineIndex();
if (lineIndex < 0 || lineIndex > maxLineIndex) {
throw new IndexOutOfBoundsException("Line index " + lineIndex +
" not in permitted range [0," + maxLineIndex + "]");
}
}
private void validateMark(Mark mark) {
if (mark.secret() != unique) {
throw new IllegalArgumentException("Unknown mark " + mark);
}
if (!mark.version().equals(textBuffer.getVersion())) {
throw new IllegalArgumentException("Mark " + mark + " is outdated");
}
}
} |
Done | public boolean replaceMatch(Pattern pattern, Function<Match, String> replacer) {
Optional<Match> match = moveForwardToStartOfMatch(pattern);
if (!match.isPresent()) {
return false;
}
textBuffer.delete(match.get().startOfMatch(), match.get().endOfMatch());
write(replacer.apply(match.get()));
return true;
} | public boolean replaceMatch(Pattern pattern, Function<Match, String> replacer) {
Optional<Match> match = moveForwardToStartOfMatch(pattern);
if (!match.isPresent()) {
return false;
}
textBuffer.delete(match.get().startOfMatch(), match.get().endOfMatch());
write(replacer.apply(match.get()));
return true;
} | class CursorImpl implements Cursor {
private final TextBuffer textBuffer;
private final Object unique = new Object();
private Position position;
/**
* Creates a cursor to a text buffer.
*
* WARNING: The text buffer MUST NOT be accessed outside this cursor. This cursor
* takes sole ownership of the text buffer.
*
* @param textBuffer the text buffer this cursor owns and operates on
*/
CursorImpl(TextBuffer textBuffer) {
this.textBuffer = textBuffer;
position = textBuffer.getStartOfText();
}
@Override
public Position getPosition() {
return position;
}
@Override
public Mark createMark() {
return new Mark(position, textBuffer.getVersion(), unique);
}
@Override
public String getBufferText() {
return textBuffer.getString();
}
@Override
public String getLine() {
return textBuffer.getLine(position);
}
@Override
public String getPrefix() {
return textBuffer.getLinePrefix(position);
}
@Override
public String getSuffix() {
return textBuffer.getLineSuffix(position);
}
@Override
public String getTextTo(Mark mark) {
validateMark(mark);
Position start = mark.position();
Position end = position;
if (start.isAfter(end)) {
Position tmp = start;
start = end;
end = tmp;
}
return textBuffer.getSubstring(start, end);
}
@Override
public Cursor moveToStartOfBuffer() {
position = textBuffer.getStartOfText();
return this;
}
@Override
public Cursor moveToEndOfBuffer() {
position = textBuffer.getEndOfText();
return this;
}
@Override
public Cursor moveToStartOfLine() {
position = textBuffer.getStartOfLine(position);
return this;
}
@Override
public Cursor moveToStartOfPreviousLine() {
position = textBuffer.getStartOfPreviousLine(position);
return this;
}
@Override
public Cursor moveToStartOfNextLine() {
position = textBuffer.getStartOfNextLine(position);
return this;
}
@Override
public Cursor moveToStartOf(int lineIndex) {
validateLineIndex(lineIndex);
position = new Position(lineIndex, 0);
return this;
}
@Override
public Cursor moveToEndOfLine() {
position = textBuffer.getEndOfLine(position);
return this;
}
@Override
public Cursor moveToEndOfPreviousLine() {
return moveToStartOfPreviousLine().moveToEndOfLine();
}
@Override
public Cursor moveToEndOfNextLine() {
return moveToStartOfNextLine().moveToEndOfLine();
}
@Override
public Cursor moveToEndOf(int lineIndex) {
return moveToStartOf(lineIndex).moveToEndOfLine();
}
@Override
public Cursor moveForward() {
return moveForward(1);
}
@Override
public Cursor moveForward(int times) {
position = textBuffer.forward(position, times);
return this;
}
@Override
public Cursor moveBackward() {
return moveBackward(1);
}
@Override
public Cursor moveBackward(int times) {
position = textBuffer.backward(position, times);
return this;
}
@Override
public Cursor moveTo(Mark mark) {
validateMark(mark);
position = mark.position();
return this;
}
@Override
public boolean skipBackward(String text) {
String prefix = getPrefix();
if (prefix.endsWith(text)) {
position = new Position(position.lineIndex(), position.columnIndex() - text.length());
return true;
} else {
return false;
}
}
@Override
public boolean skipForward(String text) {
String suffix = getSuffix();
if (suffix.startsWith(text)) {
position = new Position(position.lineIndex(), position.columnIndex() + text.length());
return true;
} else {
return false;
}
}
@Override
public Optional<Match> moveForwardToStartOfMatch(Pattern pattern) {
return moveForwardToXOfMatch(pattern, match -> position = match.startOfMatch());
}
@Override
public Optional<Match> moveForwardToEndOfMatch(Pattern pattern) {
return moveForwardToXOfMatch(pattern, match -> position = match.endOfMatch());
}
private Optional<Match> moveForwardToXOfMatch(Pattern pattern, Consumer<Match> callback) {
Optional<Match> match = textBuffer.findForward(position, pattern);
match.ifPresent(callback);
return match;
}
@Override
public Cursor moveTo(Position position) {
validatePosition(position);
this.position = position;
return this;
}
@Override
public Cursor moveTo(int lineIndex, int columnIndex) {
return moveTo(new Position(lineIndex, columnIndex));
}
@Override
public Cursor write(String text) {
position = textBuffer.write(position, text);
return this;
}
@Override
public Cursor writeLine(String line) {
return write(line).write("\n");
}
@Override
public Cursor writeLines(String... lines) {
return writeLines(Arrays.asList(lines));
}
@Override
public Cursor writeLines(List<String> lines) {
return writeLine(String.join("\n", lines));
}
@Override
public Cursor writeNewline() {
return write("\n");
}
@Override
public Cursor writeNewlineAfter() {
return writeNewline().moveBackward();
}
@Override
public Cursor deleteAll() {
moveToStartOfBuffer();
textBuffer.clear();
return this;
}
@Override
public Cursor deleteLine() {
moveToStartOfLine();
textBuffer.delete(position, textBuffer.getStartOfNextLine(position));
return this;
}
@Override
public Cursor deletePrefix() {
Position originalPosition = position;
moveToStartOfLine();
textBuffer.delete(position, originalPosition);
return this;
}
@Override
public Cursor deleteSuffix() {
textBuffer.delete(position, textBuffer.getEndOfLine(position));
return this;
}
@Override
public Cursor deleteForward() {
return deleteForward(1);
}
@Override
public Cursor deleteForward(int times) {
Position end = textBuffer.forward(position, times);
textBuffer.delete(position, end);
return this;
}
@Override
public Cursor deleteBackward() {
return deleteBackward(1);
}
@Override
public Cursor deleteBackward(int times) {
Position end = position;
moveBackward(times);
textBuffer.delete(position, end);
return this;
}
@Override
public Cursor deleteTo(Mark mark) {
Position start = mark.position();
Position end = position;
if (start.isAfter(end)) {
Position tmp = start;
start = end;
end = tmp;
}
textBuffer.delete(start, end);
return this;
}
@Override
@Override
public int replaceMatches(Pattern pattern, Function<Match, String> replacer) {
int count = 0;
for (; replaceMatch(pattern, replacer); ++count) {
}
return count;
}
private void validatePosition(Position position) {
validateLineIndex(position.lineIndex());
int maxColumnIndex = textBuffer.getLine(position.lineIndex()).length();
if (position.columnIndex() < 0 || position.columnIndex() > maxColumnIndex) {
throw new IndexOutOfBoundsException("Column index of " + position.coordinateString() +
" is not in permitted range [0," + maxColumnIndex + "]");
}
}
private void validateLineIndex(int lineIndex) {
int maxLineIndex = textBuffer.getMaxLineIndex();
if (lineIndex < 0 || lineIndex > maxLineIndex) {
throw new IndexOutOfBoundsException("Line index " + lineIndex +
" not in permitted range [0," + maxLineIndex + "]");
}
}
private void validateMark(Mark mark) {
if (mark.secret() != unique) {
throw new IllegalArgumentException("Unknown mark " + mark);
}
if (!mark.version().equals(textBuffer.getVersion())) {
throw new IllegalArgumentException("Mark " + mark + " is outdated");
}
}
} | class CursorImpl implements Cursor {
private final TextBuffer textBuffer;
private final Object unique = new Object();
private Position position;
/**
* Creates a cursor to a text buffer.
*
* WARNING: The text buffer MUST NOT be accessed outside this cursor. This cursor
* takes sole ownership of the text buffer.
*
* @param textBuffer the text buffer this cursor owns and operates on
*/
CursorImpl(TextBuffer textBuffer) {
this.textBuffer = textBuffer;
position = textBuffer.getStartOfText();
}
@Override
public Position getPosition() {
return position;
}
@Override
public Mark createMark() {
return new Mark(position, textBuffer.getVersion(), unique);
}
@Override
public String getBufferText() {
return textBuffer.getString();
}
@Override
public String getLine() {
return textBuffer.getLine(position);
}
@Override
public String getPrefix() {
return textBuffer.getLinePrefix(position);
}
@Override
public String getSuffix() {
return textBuffer.getLineSuffix(position);
}
@Override
public String getTextTo(Mark mark) {
validateMark(mark);
Position start = min(mark.position(), position);
Position end = max(mark.position(), position);
return textBuffer.getSubstring(start, end);
}
@Override
public Cursor moveToStartOfBuffer() {
position = textBuffer.getStartOfText();
return this;
}
@Override
public Cursor moveToEndOfBuffer() {
position = textBuffer.getEndOfText();
return this;
}
@Override
public Cursor moveToStartOfLine() {
position = textBuffer.getStartOfLine(position);
return this;
}
@Override
public Cursor moveToStartOfPreviousLine() {
position = textBuffer.getStartOfPreviousLine(position);
return this;
}
@Override
public Cursor moveToStartOfNextLine() {
position = textBuffer.getStartOfNextLine(position);
return this;
}
@Override
public Cursor moveToStartOf(int lineIndex) {
validateLineIndex(lineIndex);
position = new Position(lineIndex, 0);
return this;
}
@Override
public Cursor moveToEndOfLine() {
position = textBuffer.getEndOfLine(position);
return this;
}
@Override
public Cursor moveToEndOfPreviousLine() {
return moveToStartOfPreviousLine().moveToEndOfLine();
}
@Override
public Cursor moveToEndOfNextLine() {
return moveToStartOfNextLine().moveToEndOfLine();
}
@Override
public Cursor moveToEndOf(int lineIndex) {
return moveToStartOf(lineIndex).moveToEndOfLine();
}
@Override
public Cursor moveForward() {
return moveForward(1);
}
@Override
public Cursor moveForward(int times) {
position = textBuffer.forward(position, times);
return this;
}
@Override
public Cursor moveBackward() {
return moveBackward(1);
}
@Override
public Cursor moveBackward(int times) {
position = textBuffer.backward(position, times);
return this;
}
@Override
public Cursor moveTo(Mark mark) {
validateMark(mark);
position = mark.position();
return this;
}
@Override
public boolean skipBackward(String text) {
String prefix = getPrefix();
if (prefix.endsWith(text)) {
position = new Position(position.lineIndex(), position.columnIndex() - text.length());
return true;
} else {
return false;
}
}
@Override
public boolean skipForward(String text) {
String suffix = getSuffix();
if (suffix.startsWith(text)) {
position = new Position(position.lineIndex(), position.columnIndex() + text.length());
return true;
} else {
return false;
}
}
@Override
public Optional<Match> moveForwardToStartOfMatch(Pattern pattern) {
return moveForwardToXOfMatch(pattern, match -> position = match.startOfMatch());
}
@Override
public Optional<Match> moveForwardToEndOfMatch(Pattern pattern) {
return moveForwardToXOfMatch(pattern, match -> position = match.endOfMatch());
}
private Optional<Match> moveForwardToXOfMatch(Pattern pattern, Consumer<Match> callback) {
Optional<Match> match = textBuffer.findForward(position, pattern);
match.ifPresent(callback);
return match;
}
@Override
public Cursor moveTo(Position position) {
validatePosition(position);
this.position = position;
return this;
}
@Override
public Cursor moveTo(int lineIndex, int columnIndex) {
return moveTo(new Position(lineIndex, columnIndex));
}
@Override
public Cursor write(String text) {
position = textBuffer.write(position, text);
return this;
}
@Override
public Cursor writeLine(String line) {
return write(line).write("\n");
}
@Override
public Cursor writeLines(String... lines) {
return writeLines(Arrays.asList(lines));
}
@Override
public Cursor writeLines(Iterable<String> lines) {
return writeLine(String.join("\n", lines));
}
@Override
public Cursor writeNewline() {
return write("\n");
}
@Override
public Cursor writeNewlineAfter() {
return writeNewline().moveBackward();
}
@Override
public Cursor deleteAll() {
moveToStartOfBuffer();
textBuffer.clear();
return this;
}
@Override
public Cursor deleteLine() {
moveToStartOfLine();
textBuffer.delete(position, textBuffer.getStartOfNextLine(position));
return this;
}
@Override
public Cursor deletePrefix() {
Position originalPosition = position;
moveToStartOfLine();
textBuffer.delete(position, originalPosition);
return this;
}
@Override
public Cursor deleteSuffix() {
textBuffer.delete(position, textBuffer.getEndOfLine(position));
return this;
}
@Override
public Cursor deleteForward() {
return deleteForward(1);
}
@Override
public Cursor deleteForward(int times) {
Position end = textBuffer.forward(position, times);
textBuffer.delete(position, end);
return this;
}
@Override
public Cursor deleteBackward() {
return deleteBackward(1);
}
@Override
public Cursor deleteBackward(int times) {
Position end = position;
moveBackward(times);
textBuffer.delete(position, end);
return this;
}
@Override
public Cursor deleteTo(Mark mark) {
validateMark(mark);
Position start = min(mark.position(), position);
Position end = max(mark.position(), position);
textBuffer.delete(start, end);
return this;
}
@Override
@Override
public int replaceMatches(Pattern pattern, Function<Match, String> replacer) {
int count = 0;
for (; replaceMatch(pattern, replacer); ++count) {
}
return count;
}
private void validatePosition(Position position) {
validateLineIndex(position.lineIndex());
int maxColumnIndex = textBuffer.getLine(position.lineIndex()).length();
if (position.columnIndex() < 0 || position.columnIndex() > maxColumnIndex) {
throw new IndexOutOfBoundsException("Column index of " + position.coordinateString() +
" is not in permitted range [0," + maxColumnIndex + "]");
}
}
private void validateLineIndex(int lineIndex) {
int maxLineIndex = textBuffer.getMaxLineIndex();
if (lineIndex < 0 || lineIndex > maxLineIndex) {
throw new IndexOutOfBoundsException("Line index " + lineIndex +
" not in permitted range [0," + maxLineIndex + "]");
}
}
private void validateMark(Mark mark) {
if (mark.secret() != unique) {
throw new IllegalArgumentException("Unknown mark " + mark);
}
if (!mark.version().equals(textBuffer.getVersion())) {
throw new IllegalArgumentException("Mark " + mark + " is outdated");
}
}
} | |
Agreed. Unfortunately, there's no such min&max methods on Comparable in base Java, so I added such utility (instead of requiring an additional library). | public String getTextTo(Mark mark) {
validateMark(mark);
Position start = mark.position();
Position end = position;
if (start.isAfter(end)) {
Position tmp = start;
start = end;
end = tmp;
}
return textBuffer.getSubstring(start, end);
} | } | public String getTextTo(Mark mark) {
validateMark(mark);
Position start = min(mark.position(), position);
Position end = max(mark.position(), position);
return textBuffer.getSubstring(start, end);
} | class CursorImpl implements Cursor {
private final TextBuffer textBuffer;
private final Object unique = new Object();
private Position position;
/**
* Creates a cursor to a text buffer.
*
* WARNING: The text buffer MUST NOT be accessed outside this cursor. This cursor
* takes sole ownership of the text buffer.
*
* @param textBuffer the text buffer this cursor owns and operates on
*/
CursorImpl(TextBuffer textBuffer) {
this.textBuffer = textBuffer;
position = textBuffer.getStartOfText();
}
@Override
public Position getPosition() {
return position;
}
@Override
public Mark createMark() {
return new Mark(position, textBuffer.getVersion(), unique);
}
@Override
public String getBufferText() {
return textBuffer.getString();
}
@Override
public String getLine() {
return textBuffer.getLine(position);
}
@Override
public String getPrefix() {
return textBuffer.getLinePrefix(position);
}
@Override
public String getSuffix() {
return textBuffer.getLineSuffix(position);
}
@Override
@Override
public Cursor moveToStartOfBuffer() {
position = textBuffer.getStartOfText();
return this;
}
@Override
public Cursor moveToEndOfBuffer() {
position = textBuffer.getEndOfText();
return this;
}
@Override
public Cursor moveToStartOfLine() {
position = textBuffer.getStartOfLine(position);
return this;
}
@Override
public Cursor moveToStartOfPreviousLine() {
position = textBuffer.getStartOfPreviousLine(position);
return this;
}
@Override
public Cursor moveToStartOfNextLine() {
position = textBuffer.getStartOfNextLine(position);
return this;
}
@Override
public Cursor moveToStartOf(int lineIndex) {
validateLineIndex(lineIndex);
position = new Position(lineIndex, 0);
return this;
}
@Override
public Cursor moveToEndOfLine() {
position = textBuffer.getEndOfLine(position);
return this;
}
@Override
public Cursor moveToEndOfPreviousLine() {
return moveToStartOfPreviousLine().moveToEndOfLine();
}
@Override
public Cursor moveToEndOfNextLine() {
return moveToStartOfNextLine().moveToEndOfLine();
}
@Override
public Cursor moveToEndOf(int lineIndex) {
return moveToStartOf(lineIndex).moveToEndOfLine();
}
@Override
public Cursor moveForward() {
return moveForward(1);
}
@Override
public Cursor moveForward(int times) {
position = textBuffer.forward(position, times);
return this;
}
@Override
public Cursor moveBackward() {
return moveBackward(1);
}
@Override
public Cursor moveBackward(int times) {
position = textBuffer.backward(position, times);
return this;
}
@Override
public Cursor moveTo(Mark mark) {
validateMark(mark);
position = mark.position();
return this;
}
@Override
public boolean skipBackward(String text) {
String prefix = getPrefix();
if (prefix.endsWith(text)) {
position = new Position(position.lineIndex(), position.columnIndex() - text.length());
return true;
} else {
return false;
}
}
@Override
public boolean skipForward(String text) {
String suffix = getSuffix();
if (suffix.startsWith(text)) {
position = new Position(position.lineIndex(), position.columnIndex() + text.length());
return true;
} else {
return false;
}
}
@Override
public Optional<Match> moveForwardToStartOfMatch(Pattern pattern) {
return moveForwardToXOfMatch(pattern, match -> position = match.startOfMatch());
}
@Override
public Optional<Match> moveForwardToEndOfMatch(Pattern pattern) {
return moveForwardToXOfMatch(pattern, match -> position = match.endOfMatch());
}
private Optional<Match> moveForwardToXOfMatch(Pattern pattern, Consumer<Match> callback) {
Optional<Match> match = textBuffer.findForward(position, pattern);
match.ifPresent(callback);
return match;
}
@Override
public Cursor moveTo(Position position) {
validatePosition(position);
this.position = position;
return this;
}
@Override
public Cursor moveTo(int lineIndex, int columnIndex) {
return moveTo(new Position(lineIndex, columnIndex));
}
@Override
public Cursor write(String text) {
position = textBuffer.write(position, text);
return this;
}
@Override
public Cursor writeLine(String line) {
return write(line).write("\n");
}
@Override
public Cursor writeLines(String... lines) {
return writeLines(Arrays.asList(lines));
}
@Override
public Cursor writeLines(List<String> lines) {
return writeLine(String.join("\n", lines));
}
@Override
public Cursor writeNewline() {
return write("\n");
}
@Override
public Cursor writeNewlineAfter() {
return writeNewline().moveBackward();
}
@Override
public Cursor deleteAll() {
moveToStartOfBuffer();
textBuffer.clear();
return this;
}
@Override
public Cursor deleteLine() {
moveToStartOfLine();
textBuffer.delete(position, textBuffer.getStartOfNextLine(position));
return this;
}
@Override
public Cursor deletePrefix() {
Position originalPosition = position;
moveToStartOfLine();
textBuffer.delete(position, originalPosition);
return this;
}
@Override
public Cursor deleteSuffix() {
textBuffer.delete(position, textBuffer.getEndOfLine(position));
return this;
}
@Override
public Cursor deleteForward() {
return deleteForward(1);
}
@Override
public Cursor deleteForward(int times) {
Position end = textBuffer.forward(position, times);
textBuffer.delete(position, end);
return this;
}
@Override
public Cursor deleteBackward() {
return deleteBackward(1);
}
@Override
public Cursor deleteBackward(int times) {
Position end = position;
moveBackward(times);
textBuffer.delete(position, end);
return this;
}
@Override
public Cursor deleteTo(Mark mark) {
Position start = mark.position();
Position end = position;
if (start.isAfter(end)) {
Position tmp = start;
start = end;
end = tmp;
}
textBuffer.delete(start, end);
return this;
}
@Override
public boolean replaceMatch(Pattern pattern, Function<Match, String> replacer) {
Optional<Match> match = moveForwardToStartOfMatch(pattern);
if (!match.isPresent()) {
return false;
}
textBuffer.delete(match.get().startOfMatch(), match.get().endOfMatch());
write(replacer.apply(match.get()));
return true;
}
@Override
public int replaceMatches(Pattern pattern, Function<Match, String> replacer) {
int count = 0;
for (; replaceMatch(pattern, replacer); ++count) {
}
return count;
}
private void validatePosition(Position position) {
validateLineIndex(position.lineIndex());
int maxColumnIndex = textBuffer.getLine(position.lineIndex()).length();
if (position.columnIndex() < 0 || position.columnIndex() > maxColumnIndex) {
throw new IndexOutOfBoundsException("Column index of " + position.coordinateString() +
" is not in permitted range [0," + maxColumnIndex + "]");
}
}
private void validateLineIndex(int lineIndex) {
int maxLineIndex = textBuffer.getMaxLineIndex();
if (lineIndex < 0 || lineIndex > maxLineIndex) {
throw new IndexOutOfBoundsException("Line index " + lineIndex +
" not in permitted range [0," + maxLineIndex + "]");
}
}
private void validateMark(Mark mark) {
if (mark.secret() != unique) {
throw new IllegalArgumentException("Unknown mark " + mark);
}
if (!mark.version().equals(textBuffer.getVersion())) {
throw new IllegalArgumentException("Mark " + mark + " is outdated");
}
}
} | class CursorImpl implements Cursor {
private final TextBuffer textBuffer;
private final Object unique = new Object();
private Position position;
/**
* Creates a cursor to a text buffer.
*
* WARNING: The text buffer MUST NOT be accessed outside this cursor. This cursor
* takes sole ownership of the text buffer.
*
* @param textBuffer the text buffer this cursor owns and operates on
*/
CursorImpl(TextBuffer textBuffer) {
this.textBuffer = textBuffer;
position = textBuffer.getStartOfText();
}
@Override
public Position getPosition() {
return position;
}
@Override
public Mark createMark() {
return new Mark(position, textBuffer.getVersion(), unique);
}
@Override
public String getBufferText() {
return textBuffer.getString();
}
@Override
public String getLine() {
return textBuffer.getLine(position);
}
@Override
public String getPrefix() {
return textBuffer.getLinePrefix(position);
}
@Override
public String getSuffix() {
return textBuffer.getLineSuffix(position);
}
@Override
@Override
public Cursor moveToStartOfBuffer() {
position = textBuffer.getStartOfText();
return this;
}
@Override
public Cursor moveToEndOfBuffer() {
position = textBuffer.getEndOfText();
return this;
}
@Override
public Cursor moveToStartOfLine() {
position = textBuffer.getStartOfLine(position);
return this;
}
@Override
public Cursor moveToStartOfPreviousLine() {
position = textBuffer.getStartOfPreviousLine(position);
return this;
}
@Override
public Cursor moveToStartOfNextLine() {
position = textBuffer.getStartOfNextLine(position);
return this;
}
@Override
public Cursor moveToStartOf(int lineIndex) {
validateLineIndex(lineIndex);
position = new Position(lineIndex, 0);
return this;
}
@Override
public Cursor moveToEndOfLine() {
position = textBuffer.getEndOfLine(position);
return this;
}
@Override
public Cursor moveToEndOfPreviousLine() {
return moveToStartOfPreviousLine().moveToEndOfLine();
}
@Override
public Cursor moveToEndOfNextLine() {
return moveToStartOfNextLine().moveToEndOfLine();
}
@Override
public Cursor moveToEndOf(int lineIndex) {
return moveToStartOf(lineIndex).moveToEndOfLine();
}
@Override
public Cursor moveForward() {
return moveForward(1);
}
@Override
public Cursor moveForward(int times) {
position = textBuffer.forward(position, times);
return this;
}
@Override
public Cursor moveBackward() {
return moveBackward(1);
}
@Override
public Cursor moveBackward(int times) {
position = textBuffer.backward(position, times);
return this;
}
@Override
public Cursor moveTo(Mark mark) {
validateMark(mark);
position = mark.position();
return this;
}
@Override
public boolean skipBackward(String text) {
String prefix = getPrefix();
if (prefix.endsWith(text)) {
position = new Position(position.lineIndex(), position.columnIndex() - text.length());
return true;
} else {
return false;
}
}
@Override
public boolean skipForward(String text) {
String suffix = getSuffix();
if (suffix.startsWith(text)) {
position = new Position(position.lineIndex(), position.columnIndex() + text.length());
return true;
} else {
return false;
}
}
@Override
public Optional<Match> moveForwardToStartOfMatch(Pattern pattern) {
return moveForwardToXOfMatch(pattern, match -> position = match.startOfMatch());
}
@Override
public Optional<Match> moveForwardToEndOfMatch(Pattern pattern) {
return moveForwardToXOfMatch(pattern, match -> position = match.endOfMatch());
}
private Optional<Match> moveForwardToXOfMatch(Pattern pattern, Consumer<Match> callback) {
Optional<Match> match = textBuffer.findForward(position, pattern);
match.ifPresent(callback);
return match;
}
@Override
public Cursor moveTo(Position position) {
validatePosition(position);
this.position = position;
return this;
}
@Override
public Cursor moveTo(int lineIndex, int columnIndex) {
return moveTo(new Position(lineIndex, columnIndex));
}
@Override
public Cursor write(String text) {
position = textBuffer.write(position, text);
return this;
}
@Override
public Cursor writeLine(String line) {
return write(line).write("\n");
}
@Override
public Cursor writeLines(String... lines) {
return writeLines(Arrays.asList(lines));
}
@Override
public Cursor writeLines(Iterable<String> lines) {
return writeLine(String.join("\n", lines));
}
@Override
public Cursor writeNewline() {
return write("\n");
}
@Override
public Cursor writeNewlineAfter() {
return writeNewline().moveBackward();
}
@Override
public Cursor deleteAll() {
moveToStartOfBuffer();
textBuffer.clear();
return this;
}
@Override
public Cursor deleteLine() {
moveToStartOfLine();
textBuffer.delete(position, textBuffer.getStartOfNextLine(position));
return this;
}
@Override
public Cursor deletePrefix() {
Position originalPosition = position;
moveToStartOfLine();
textBuffer.delete(position, originalPosition);
return this;
}
@Override
public Cursor deleteSuffix() {
textBuffer.delete(position, textBuffer.getEndOfLine(position));
return this;
}
@Override
public Cursor deleteForward() {
return deleteForward(1);
}
@Override
public Cursor deleteForward(int times) {
Position end = textBuffer.forward(position, times);
textBuffer.delete(position, end);
return this;
}
@Override
public Cursor deleteBackward() {
return deleteBackward(1);
}
@Override
public Cursor deleteBackward(int times) {
Position end = position;
moveBackward(times);
textBuffer.delete(position, end);
return this;
}
@Override
public Cursor deleteTo(Mark mark) {
validateMark(mark);
Position start = min(mark.position(), position);
Position end = max(mark.position(), position);
textBuffer.delete(start, end);
return this;
}
@Override
public boolean replaceMatch(Pattern pattern, Function<Match, String> replacer) {
Optional<Match> match = moveForwardToStartOfMatch(pattern);
if (!match.isPresent()) {
return false;
}
textBuffer.delete(match.get().startOfMatch(), match.get().endOfMatch());
write(replacer.apply(match.get()));
return true;
}
@Override
public int replaceMatches(Pattern pattern, Function<Match, String> replacer) {
int count = 0;
for (; replaceMatch(pattern, replacer); ++count) {
}
return count;
}
private void validatePosition(Position position) {
validateLineIndex(position.lineIndex());
int maxColumnIndex = textBuffer.getLine(position.lineIndex()).length();
if (position.columnIndex() < 0 || position.columnIndex() > maxColumnIndex) {
throw new IndexOutOfBoundsException("Column index of " + position.coordinateString() +
" is not in permitted range [0," + maxColumnIndex + "]");
}
}
private void validateLineIndex(int lineIndex) {
int maxLineIndex = textBuffer.getMaxLineIndex();
if (lineIndex < 0 || lineIndex > maxLineIndex) {
throw new IndexOutOfBoundsException("Line index " + lineIndex +
" not in permitted range [0," + maxLineIndex + "]");
}
}
private void validateMark(Mark mark) {
if (mark.secret() != unique) {
throw new IllegalArgumentException("Unknown mark " + mark);
}
if (!mark.version().equals(textBuffer.getVersion())) {
throw new IllegalArgumentException("Mark " + mark + " is outdated");
}
}
} |
For some reason I was thinking ArrayList - Done | private void deleteLines(int startIndex, int endIndex) {
int fromIndex = endIndex;
for (int toIndex = startIndex; fromIndex <= getMaxLineIndex(); ++toIndex, ++fromIndex) {
lines.set(toIndex, lines.get(fromIndex));
}
truncate(getMaxLineIndex() - (endIndex - startIndex));
} | } | private void deleteLines(int startIndex, int endIndex) {
for (int fromIndex = endIndex, toIndex = startIndex; fromIndex <= getMaxLineIndex();
++toIndex, ++fromIndex) {
lines.set(toIndex, lines.get(fromIndex));
}
truncate(getMaxLineIndex() - (endIndex - startIndex));
} | class TextBufferImpl implements TextBuffer {
private final LinkedList<String> lines = new LinkedList<>();
private Version version = new Version();
TextBufferImpl() {
lines.add("");
}
TextBufferImpl(String text) {
lines.add("");
write(getStartOfText(), text);
version = new Version();
}
@Override
public Version getVersion() {
return version;
}
@Override
public String getString() {
return String.join("\n", lines);
}
@Override
public int getMaxLineIndex() {
return lines.size() - 1;
}
@Override
public String getLine(int lineIndex) {
return lines.get(lineIndex);
}
@Override
public Position write(Position position, String text) {
List<String> linesToInsert = splitString(text, true, false);
if (linesToInsert.isEmpty()) {
return position;
}
int lineIndex = position.lineIndex();
String prefix = getLinePrefix(position);
linesToInsert.set(0, prefix + linesToInsert.get(0));
int numberOfLinesToInsert = linesToInsert.size() - 1;
String prefixOfCursorAfterwards = linesToInsert.get(numberOfLinesToInsert);
String suffix = getLineSuffix(position);
String lastLine = prefixOfCursorAfterwards + suffix;
linesToInsert.set(numberOfLinesToInsert, lastLine);
lines.set(lineIndex, linesToInsert.remove(0));
lines.addAll(lineIndex + 1, linesToInsert);
incrementVersion();
return new Position(lineIndex + linesToInsert.size(), prefixOfCursorAfterwards.length());
}
@Override
public void clear() {
lines.clear();
lines.add("");
}
@Override
public void delete(Position start, Position end) {
if (start.isAfter(end)) {
throw new IllegalArgumentException("start position " + start +
" is after end position " + end);
}
String prefix = getLinePrefix(start);
String suffix = getLineSuffix(end);
String stichedLine = prefix + suffix;
lines.set(start.lineIndex(), stichedLine);
deleteLines(start.lineIndex() + 1, end.lineIndex() + 1);
incrementVersion();
}
private void truncate(int newMaxLineIndex) {
while (getMaxLineIndex() > newMaxLineIndex) {
lines.remove(getMaxLineIndex());
}
}
private void incrementVersion() {
version = version.next();
}
} | class TextBufferImpl implements TextBuffer {
/** Invariant: {@code size() >= 1}. An empty text buffer {@code => [""]} */
private final ArrayList<String> lines = new ArrayList<>();
private Version version = new Version();
TextBufferImpl() {
lines.add("");
}
TextBufferImpl(String text) {
this();
write(getStartOfText(), text);
version = new Version();
}
@Override
public Version getVersion() {
return version;
}
@Override
public String getString() {
return String.join("\n", lines);
}
@Override
public int getMaxLineIndex() {
return lines.size() - 1;
}
@Override
public String getLine(int lineIndex) {
return lines.get(lineIndex);
}
@Override
public Position write(Position position, String text) {
List<String> linesToInsert = new LinkedList<>(splitString(text, true, false));
if (linesToInsert.isEmpty()) {
return position;
}
linesToInsert.set(0, getLinePrefix(position) + linesToInsert.get(0));
String lastLine = linesToInsert.get(linesToInsert.size() - 1);
int endColumnIndex = lastLine.length();
linesToInsert.set(linesToInsert.size() - 1, lastLine + getLineSuffix(position));
int lineIndex = position.lineIndex();
int endLineIndex = lineIndex + linesToInsert.size() - 1;
lines.set(lineIndex, linesToInsert.remove(0));
lines.addAll(lineIndex + 1, linesToInsert);
incrementVersion();
return new Position(endLineIndex, endColumnIndex);
}
@Override
public void clear() {
lines.clear();
lines.add("");
}
@Override
public void delete(Position start, Position end) {
if (start.isAfter(end)) {
throw new IllegalArgumentException("start position " + start +
" is after end position " + end);
}
String prefix = getLinePrefix(start);
String suffix = getLineSuffix(end);
String stichedLine = prefix + suffix;
lines.set(start.lineIndex(), stichedLine);
deleteLines(start.lineIndex() + 1, end.lineIndex() + 1);
incrementVersion();
}
private void truncate(int newMaxLineIndex) {
while (getMaxLineIndex() > newMaxLineIndex) {
lines.remove(getMaxLineIndex());
}
}
private void incrementVersion() {
version = version.next();
}
} |
refactored method, PTAL | public Position write(Position position, String text) {
List<String> linesToInsert = splitString(text, true, false);
if (linesToInsert.isEmpty()) {
return position;
}
int lineIndex = position.lineIndex();
String prefix = getLinePrefix(position);
linesToInsert.set(0, prefix + linesToInsert.get(0));
int numberOfLinesToInsert = linesToInsert.size() - 1;
String prefixOfCursorAfterwards = linesToInsert.get(numberOfLinesToInsert);
String suffix = getLineSuffix(position);
String lastLine = prefixOfCursorAfterwards + suffix;
linesToInsert.set(numberOfLinesToInsert, lastLine);
lines.set(lineIndex, linesToInsert.remove(0));
lines.addAll(lineIndex + 1, linesToInsert);
incrementVersion();
return new Position(lineIndex + linesToInsert.size(), prefixOfCursorAfterwards.length());
} | return new Position(lineIndex + linesToInsert.size(), prefixOfCursorAfterwards.length()); | public Position write(Position position, String text) {
List<String> linesToInsert = new LinkedList<>(splitString(text, true, false));
if (linesToInsert.isEmpty()) {
return position;
}
linesToInsert.set(0, getLinePrefix(position) + linesToInsert.get(0));
String lastLine = linesToInsert.get(linesToInsert.size() - 1);
int endColumnIndex = lastLine.length();
linesToInsert.set(linesToInsert.size() - 1, lastLine + getLineSuffix(position));
int lineIndex = position.lineIndex();
int endLineIndex = lineIndex + linesToInsert.size() - 1;
lines.set(lineIndex, linesToInsert.remove(0));
lines.addAll(lineIndex + 1, linesToInsert);
incrementVersion();
return new Position(endLineIndex, endColumnIndex);
} | class TextBufferImpl implements TextBuffer {
private final LinkedList<String> lines = new LinkedList<>();
private Version version = new Version();
TextBufferImpl() {
lines.add("");
}
TextBufferImpl(String text) {
lines.add("");
write(getStartOfText(), text);
version = new Version();
}
@Override
public Version getVersion() {
return version;
}
@Override
public String getString() {
return String.join("\n", lines);
}
@Override
public int getMaxLineIndex() {
return lines.size() - 1;
}
@Override
public String getLine(int lineIndex) {
return lines.get(lineIndex);
}
@Override
@Override
public void clear() {
lines.clear();
lines.add("");
}
@Override
public void delete(Position start, Position end) {
if (start.isAfter(end)) {
throw new IllegalArgumentException("start position " + start +
" is after end position " + end);
}
String prefix = getLinePrefix(start);
String suffix = getLineSuffix(end);
String stichedLine = prefix + suffix;
lines.set(start.lineIndex(), stichedLine);
deleteLines(start.lineIndex() + 1, end.lineIndex() + 1);
incrementVersion();
}
private void deleteLines(int startIndex, int endIndex) {
int fromIndex = endIndex;
for (int toIndex = startIndex; fromIndex <= getMaxLineIndex(); ++toIndex, ++fromIndex) {
lines.set(toIndex, lines.get(fromIndex));
}
truncate(getMaxLineIndex() - (endIndex - startIndex));
}
private void truncate(int newMaxLineIndex) {
while (getMaxLineIndex() > newMaxLineIndex) {
lines.remove(getMaxLineIndex());
}
}
private void incrementVersion() {
version = version.next();
}
} | class TextBufferImpl implements TextBuffer {
/** Invariant: {@code size() >= 1}. An empty text buffer {@code => [""]} */
private final ArrayList<String> lines = new ArrayList<>();
private Version version = new Version();
TextBufferImpl() {
lines.add("");
}
TextBufferImpl(String text) {
this();
write(getStartOfText(), text);
version = new Version();
}
@Override
public Version getVersion() {
return version;
}
@Override
public String getString() {
return String.join("\n", lines);
}
@Override
public int getMaxLineIndex() {
return lines.size() - 1;
}
@Override
public String getLine(int lineIndex) {
return lines.get(lineIndex);
}
@Override
@Override
public void clear() {
lines.clear();
lines.add("");
}
@Override
public void delete(Position start, Position end) {
if (start.isAfter(end)) {
throw new IllegalArgumentException("start position " + start +
" is after end position " + end);
}
String prefix = getLinePrefix(start);
String suffix = getLineSuffix(end);
String stichedLine = prefix + suffix;
lines.set(start.lineIndex(), stichedLine);
deleteLines(start.lineIndex() + 1, end.lineIndex() + 1);
incrementVersion();
}
private void deleteLines(int startIndex, int endIndex) {
for (int fromIndex = endIndex, toIndex = startIndex; fromIndex <= getMaxLineIndex();
++toIndex, ++fromIndex) {
lines.set(toIndex, lines.get(fromIndex));
}
truncate(getMaxLineIndex() - (endIndex - startIndex));
}
private void truncate(int newMaxLineIndex) {
while (getMaxLineIndex() > newMaxLineIndex) {
lines.remove(getMaxLineIndex());
}
}
private void incrementVersion() {
version = version.next();
}
} |
Very good! | public Position write(Position position, String text) {
List<String> linesToInsert = new LinkedList<>(splitString(text, true, false));
if (linesToInsert.isEmpty()) {
return position;
}
linesToInsert.set(0, getLinePrefix(position) + linesToInsert.get(0));
String lastLine = linesToInsert.get(linesToInsert.size() - 1);
int endColumnIndex = lastLine.length();
linesToInsert.set(linesToInsert.size() - 1, lastLine + getLineSuffix(position));
int lineIndex = position.lineIndex();
int endLineIndex = lineIndex + linesToInsert.size() - 1;
lines.set(lineIndex, linesToInsert.remove(0));
lines.addAll(lineIndex + 1, linesToInsert);
incrementVersion();
return new Position(endLineIndex, endColumnIndex);
} | return new Position(endLineIndex, endColumnIndex); | public Position write(Position position, String text) {
List<String> linesToInsert = new LinkedList<>(splitString(text, true, false));
if (linesToInsert.isEmpty()) {
return position;
}
linesToInsert.set(0, getLinePrefix(position) + linesToInsert.get(0));
String lastLine = linesToInsert.get(linesToInsert.size() - 1);
int endColumnIndex = lastLine.length();
linesToInsert.set(linesToInsert.size() - 1, lastLine + getLineSuffix(position));
int lineIndex = position.lineIndex();
int endLineIndex = lineIndex + linesToInsert.size() - 1;
lines.set(lineIndex, linesToInsert.remove(0));
lines.addAll(lineIndex + 1, linesToInsert);
incrementVersion();
return new Position(endLineIndex, endColumnIndex);
} | class TextBufferImpl implements TextBuffer {
/** Invariant: {@code size() >= 1}. An empty text buffer {@code => [""]} */
private final LinkedList<String> lines = new LinkedList<>();
private Version version = new Version();
TextBufferImpl() {
lines.add("");
}
TextBufferImpl(String text) {
this();
write(getStartOfText(), text);
version = new Version();
}
@Override
public Version getVersion() {
return version;
}
@Override
public String getString() {
return String.join("\n", lines);
}
@Override
public int getMaxLineIndex() {
return lines.size() - 1;
}
@Override
public String getLine(int lineIndex) {
return lines.get(lineIndex);
}
@Override
@Override
public void clear() {
lines.clear();
lines.add("");
}
@Override
public void delete(Position start, Position end) {
if (start.isAfter(end)) {
throw new IllegalArgumentException("start position " + start +
" is after end position " + end);
}
String prefix = getLinePrefix(start);
String suffix = getLineSuffix(end);
String stichedLine = prefix + suffix;
lines.set(start.lineIndex(), stichedLine);
deleteLines(start.lineIndex() + 1, end.lineIndex() + 1);
incrementVersion();
}
private void deleteLines(int startIndex, int endIndex) {
for (int i = startIndex; i < endIndex; ++i) {
lines.remove(startIndex);
}
}
private void truncate(int newMaxLineIndex) {
while (getMaxLineIndex() > newMaxLineIndex) {
lines.remove(getMaxLineIndex());
}
}
private void incrementVersion() {
version = version.next();
}
} | class TextBufferImpl implements TextBuffer {
/** Invariant: {@code size() >= 1}. An empty text buffer {@code => [""]} */
private final ArrayList<String> lines = new ArrayList<>();
private Version version = new Version();
TextBufferImpl() {
lines.add("");
}
TextBufferImpl(String text) {
this();
write(getStartOfText(), text);
version = new Version();
}
@Override
public Version getVersion() {
return version;
}
@Override
public String getString() {
return String.join("\n", lines);
}
@Override
public int getMaxLineIndex() {
return lines.size() - 1;
}
@Override
public String getLine(int lineIndex) {
return lines.get(lineIndex);
}
@Override
@Override
public void clear() {
lines.clear();
lines.add("");
}
@Override
public void delete(Position start, Position end) {
if (start.isAfter(end)) {
throw new IllegalArgumentException("start position " + start +
" is after end position " + end);
}
String prefix = getLinePrefix(start);
String suffix = getLineSuffix(end);
String stichedLine = prefix + suffix;
lines.set(start.lineIndex(), stichedLine);
deleteLines(start.lineIndex() + 1, end.lineIndex() + 1);
incrementVersion();
}
private void deleteLines(int startIndex, int endIndex) {
for (int fromIndex = endIndex, toIndex = startIndex; fromIndex <= getMaxLineIndex();
++toIndex, ++fromIndex) {
lines.set(toIndex, lines.get(fromIndex));
}
truncate(getMaxLineIndex() - (endIndex - startIndex));
}
private void truncate(int newMaxLineIndex) {
while (getMaxLineIndex() > newMaxLineIndex) {
lines.remove(getMaxLineIndex());
}
}
private void incrementVersion() {
version = version.next();
}
} |
Doesn't this, again, run in `O(nm)`, though, for `m` deletions in a list of `n` elements? That was my main reason for suggesting an iterator. Perhaps I'm just silly :) | private void deleteLines(int startIndex, int endIndex) {
for (int i = startIndex; i < endIndex; ++i) {
lines.remove(startIndex);
}
} | lines.remove(startIndex); | private void deleteLines(int startIndex, int endIndex) {
for (int fromIndex = endIndex, toIndex = startIndex; fromIndex <= getMaxLineIndex();
++toIndex, ++fromIndex) {
lines.set(toIndex, lines.get(fromIndex));
}
truncate(getMaxLineIndex() - (endIndex - startIndex));
} | class TextBufferImpl implements TextBuffer {
/** Invariant: {@code size() >= 1}. An empty text buffer {@code => [""]} */
private final LinkedList<String> lines = new LinkedList<>();
private Version version = new Version();
TextBufferImpl() {
lines.add("");
}
TextBufferImpl(String text) {
this();
write(getStartOfText(), text);
version = new Version();
}
@Override
public Version getVersion() {
return version;
}
@Override
public String getString() {
return String.join("\n", lines);
}
@Override
public int getMaxLineIndex() {
return lines.size() - 1;
}
@Override
public String getLine(int lineIndex) {
return lines.get(lineIndex);
}
@Override
public Position write(Position position, String text) {
List<String> linesToInsert = new LinkedList<>(splitString(text, true, false));
if (linesToInsert.isEmpty()) {
return position;
}
linesToInsert.set(0, getLinePrefix(position) + linesToInsert.get(0));
String lastLine = linesToInsert.get(linesToInsert.size() - 1);
int endColumnIndex = lastLine.length();
linesToInsert.set(linesToInsert.size() - 1, lastLine + getLineSuffix(position));
int lineIndex = position.lineIndex();
int endLineIndex = lineIndex + linesToInsert.size() - 1;
lines.set(lineIndex, linesToInsert.remove(0));
lines.addAll(lineIndex + 1, linesToInsert);
incrementVersion();
return new Position(endLineIndex, endColumnIndex);
}
@Override
public void clear() {
lines.clear();
lines.add("");
}
@Override
public void delete(Position start, Position end) {
if (start.isAfter(end)) {
throw new IllegalArgumentException("start position " + start +
" is after end position " + end);
}
String prefix = getLinePrefix(start);
String suffix = getLineSuffix(end);
String stichedLine = prefix + suffix;
lines.set(start.lineIndex(), stichedLine);
deleteLines(start.lineIndex() + 1, end.lineIndex() + 1);
incrementVersion();
}
private void truncate(int newMaxLineIndex) {
while (getMaxLineIndex() > newMaxLineIndex) {
lines.remove(getMaxLineIndex());
}
}
private void incrementVersion() {
version = version.next();
}
} | class TextBufferImpl implements TextBuffer {
/** Invariant: {@code size() >= 1}. An empty text buffer {@code => [""]} */
private final ArrayList<String> lines = new ArrayList<>();
private Version version = new Version();
TextBufferImpl() {
lines.add("");
}
TextBufferImpl(String text) {
this();
write(getStartOfText(), text);
version = new Version();
}
@Override
public Version getVersion() {
return version;
}
@Override
public String getString() {
return String.join("\n", lines);
}
@Override
public int getMaxLineIndex() {
return lines.size() - 1;
}
@Override
public String getLine(int lineIndex) {
return lines.get(lineIndex);
}
@Override
public Position write(Position position, String text) {
List<String> linesToInsert = new LinkedList<>(splitString(text, true, false));
if (linesToInsert.isEmpty()) {
return position;
}
linesToInsert.set(0, getLinePrefix(position) + linesToInsert.get(0));
String lastLine = linesToInsert.get(linesToInsert.size() - 1);
int endColumnIndex = lastLine.length();
linesToInsert.set(linesToInsert.size() - 1, lastLine + getLineSuffix(position));
int lineIndex = position.lineIndex();
int endLineIndex = lineIndex + linesToInsert.size() - 1;
lines.set(lineIndex, linesToInsert.remove(0));
lines.addAll(lineIndex + 1, linesToInsert);
incrementVersion();
return new Position(endLineIndex, endColumnIndex);
}
@Override
public void clear() {
lines.clear();
lines.add("");
}
@Override
public void delete(Position start, Position end) {
if (start.isAfter(end)) {
throw new IllegalArgumentException("start position " + start +
" is after end position " + end);
}
String prefix = getLinePrefix(start);
String suffix = getLineSuffix(end);
String stichedLine = prefix + suffix;
lines.set(start.lineIndex(), stichedLine);
deleteLines(start.lineIndex() + 1, end.lineIndex() + 1);
incrementVersion();
}
private void truncate(int newMaxLineIndex) {
while (getMaxLineIndex() > newMaxLineIndex) {
lines.remove(getMaxLineIndex());
}
}
private void incrementVersion() {
version = version.next();
}
} |
Cool! | public String getTextTo(Mark mark) {
validateMark(mark);
Position start = mark.position();
Position end = position;
if (start.isAfter(end)) {
Position tmp = start;
start = end;
end = tmp;
}
return textBuffer.getSubstring(start, end);
} | } | public String getTextTo(Mark mark) {
validateMark(mark);
Position start = min(mark.position(), position);
Position end = max(mark.position(), position);
return textBuffer.getSubstring(start, end);
} | class CursorImpl implements Cursor {
private final TextBuffer textBuffer;
private final Object unique = new Object();
private Position position;
/**
* Creates a cursor to a text buffer.
*
* WARNING: The text buffer MUST NOT be accessed outside this cursor. This cursor
* takes sole ownership of the text buffer.
*
* @param textBuffer the text buffer this cursor owns and operates on
*/
CursorImpl(TextBuffer textBuffer) {
this.textBuffer = textBuffer;
position = textBuffer.getStartOfText();
}
@Override
public Position getPosition() {
return position;
}
@Override
public Mark createMark() {
return new Mark(position, textBuffer.getVersion(), unique);
}
@Override
public String getBufferText() {
return textBuffer.getString();
}
@Override
public String getLine() {
return textBuffer.getLine(position);
}
@Override
public String getPrefix() {
return textBuffer.getLinePrefix(position);
}
@Override
public String getSuffix() {
return textBuffer.getLineSuffix(position);
}
@Override
@Override
public Cursor moveToStartOfBuffer() {
position = textBuffer.getStartOfText();
return this;
}
@Override
public Cursor moveToEndOfBuffer() {
position = textBuffer.getEndOfText();
return this;
}
@Override
public Cursor moveToStartOfLine() {
position = textBuffer.getStartOfLine(position);
return this;
}
@Override
public Cursor moveToStartOfPreviousLine() {
position = textBuffer.getStartOfPreviousLine(position);
return this;
}
@Override
public Cursor moveToStartOfNextLine() {
position = textBuffer.getStartOfNextLine(position);
return this;
}
@Override
public Cursor moveToStartOf(int lineIndex) {
validateLineIndex(lineIndex);
position = new Position(lineIndex, 0);
return this;
}
@Override
public Cursor moveToEndOfLine() {
position = textBuffer.getEndOfLine(position);
return this;
}
@Override
public Cursor moveToEndOfPreviousLine() {
return moveToStartOfPreviousLine().moveToEndOfLine();
}
@Override
public Cursor moveToEndOfNextLine() {
return moveToStartOfNextLine().moveToEndOfLine();
}
@Override
public Cursor moveToEndOf(int lineIndex) {
return moveToStartOf(lineIndex).moveToEndOfLine();
}
@Override
public Cursor moveForward() {
return moveForward(1);
}
@Override
public Cursor moveForward(int times) {
position = textBuffer.forward(position, times);
return this;
}
@Override
public Cursor moveBackward() {
return moveBackward(1);
}
@Override
public Cursor moveBackward(int times) {
position = textBuffer.backward(position, times);
return this;
}
@Override
public Cursor moveTo(Mark mark) {
validateMark(mark);
position = mark.position();
return this;
}
@Override
public boolean skipBackward(String text) {
String prefix = getPrefix();
if (prefix.endsWith(text)) {
position = new Position(position.lineIndex(), position.columnIndex() - text.length());
return true;
} else {
return false;
}
}
@Override
public boolean skipForward(String text) {
String suffix = getSuffix();
if (suffix.startsWith(text)) {
position = new Position(position.lineIndex(), position.columnIndex() + text.length());
return true;
} else {
return false;
}
}
@Override
public Optional<Match> moveForwardToStartOfMatch(Pattern pattern) {
return moveForwardToXOfMatch(pattern, match -> position = match.startOfMatch());
}
@Override
public Optional<Match> moveForwardToEndOfMatch(Pattern pattern) {
return moveForwardToXOfMatch(pattern, match -> position = match.endOfMatch());
}
private Optional<Match> moveForwardToXOfMatch(Pattern pattern, Consumer<Match> callback) {
Optional<Match> match = textBuffer.findForward(position, pattern);
match.ifPresent(callback);
return match;
}
@Override
public Cursor moveTo(Position position) {
validatePosition(position);
this.position = position;
return this;
}
@Override
public Cursor moveTo(int lineIndex, int columnIndex) {
return moveTo(new Position(lineIndex, columnIndex));
}
@Override
public Cursor write(String text) {
position = textBuffer.write(position, text);
return this;
}
@Override
public Cursor writeLine(String line) {
return write(line).write("\n");
}
@Override
public Cursor writeLines(String... lines) {
return writeLines(Arrays.asList(lines));
}
@Override
public Cursor writeLines(List<String> lines) {
return writeLine(String.join("\n", lines));
}
@Override
public Cursor writeNewline() {
return write("\n");
}
@Override
public Cursor writeNewlineAfter() {
return writeNewline().moveBackward();
}
@Override
public Cursor deleteAll() {
moveToStartOfBuffer();
textBuffer.clear();
return this;
}
@Override
public Cursor deleteLine() {
moveToStartOfLine();
textBuffer.delete(position, textBuffer.getStartOfNextLine(position));
return this;
}
@Override
public Cursor deletePrefix() {
Position originalPosition = position;
moveToStartOfLine();
textBuffer.delete(position, originalPosition);
return this;
}
@Override
public Cursor deleteSuffix() {
textBuffer.delete(position, textBuffer.getEndOfLine(position));
return this;
}
@Override
public Cursor deleteForward() {
return deleteForward(1);
}
@Override
public Cursor deleteForward(int times) {
Position end = textBuffer.forward(position, times);
textBuffer.delete(position, end);
return this;
}
@Override
public Cursor deleteBackward() {
return deleteBackward(1);
}
@Override
public Cursor deleteBackward(int times) {
Position end = position;
moveBackward(times);
textBuffer.delete(position, end);
return this;
}
@Override
public Cursor deleteTo(Mark mark) {
Position start = mark.position();
Position end = position;
if (start.isAfter(end)) {
Position tmp = start;
start = end;
end = tmp;
}
textBuffer.delete(start, end);
return this;
}
@Override
public boolean replaceMatch(Pattern pattern, Function<Match, String> replacer) {
Optional<Match> match = moveForwardToStartOfMatch(pattern);
if (!match.isPresent()) {
return false;
}
textBuffer.delete(match.get().startOfMatch(), match.get().endOfMatch());
write(replacer.apply(match.get()));
return true;
}
@Override
public int replaceMatches(Pattern pattern, Function<Match, String> replacer) {
int count = 0;
for (; replaceMatch(pattern, replacer); ++count) {
}
return count;
}
private void validatePosition(Position position) {
validateLineIndex(position.lineIndex());
int maxColumnIndex = textBuffer.getLine(position.lineIndex()).length();
if (position.columnIndex() < 0 || position.columnIndex() > maxColumnIndex) {
throw new IndexOutOfBoundsException("Column index of " + position.coordinateString() +
" is not in permitted range [0," + maxColumnIndex + "]");
}
}
private void validateLineIndex(int lineIndex) {
int maxLineIndex = textBuffer.getMaxLineIndex();
if (lineIndex < 0 || lineIndex > maxLineIndex) {
throw new IndexOutOfBoundsException("Line index " + lineIndex +
" not in permitted range [0," + maxLineIndex + "]");
}
}
private void validateMark(Mark mark) {
if (mark.secret() != unique) {
throw new IllegalArgumentException("Unknown mark " + mark);
}
if (!mark.version().equals(textBuffer.getVersion())) {
throw new IllegalArgumentException("Mark " + mark + " is outdated");
}
}
} | class CursorImpl implements Cursor {
private final TextBuffer textBuffer;
private final Object unique = new Object();
private Position position;
/**
* Creates a cursor to a text buffer.
*
* WARNING: The text buffer MUST NOT be accessed outside this cursor. This cursor
* takes sole ownership of the text buffer.
*
* @param textBuffer the text buffer this cursor owns and operates on
*/
CursorImpl(TextBuffer textBuffer) {
this.textBuffer = textBuffer;
position = textBuffer.getStartOfText();
}
@Override
public Position getPosition() {
return position;
}
@Override
public Mark createMark() {
return new Mark(position, textBuffer.getVersion(), unique);
}
@Override
public String getBufferText() {
return textBuffer.getString();
}
@Override
public String getLine() {
return textBuffer.getLine(position);
}
@Override
public String getPrefix() {
return textBuffer.getLinePrefix(position);
}
@Override
public String getSuffix() {
return textBuffer.getLineSuffix(position);
}
@Override
@Override
public Cursor moveToStartOfBuffer() {
position = textBuffer.getStartOfText();
return this;
}
@Override
public Cursor moveToEndOfBuffer() {
position = textBuffer.getEndOfText();
return this;
}
@Override
public Cursor moveToStartOfLine() {
position = textBuffer.getStartOfLine(position);
return this;
}
@Override
public Cursor moveToStartOfPreviousLine() {
position = textBuffer.getStartOfPreviousLine(position);
return this;
}
@Override
public Cursor moveToStartOfNextLine() {
position = textBuffer.getStartOfNextLine(position);
return this;
}
@Override
public Cursor moveToStartOf(int lineIndex) {
validateLineIndex(lineIndex);
position = new Position(lineIndex, 0);
return this;
}
@Override
public Cursor moveToEndOfLine() {
position = textBuffer.getEndOfLine(position);
return this;
}
@Override
public Cursor moveToEndOfPreviousLine() {
return moveToStartOfPreviousLine().moveToEndOfLine();
}
@Override
public Cursor moveToEndOfNextLine() {
return moveToStartOfNextLine().moveToEndOfLine();
}
@Override
public Cursor moveToEndOf(int lineIndex) {
return moveToStartOf(lineIndex).moveToEndOfLine();
}
@Override
public Cursor moveForward() {
return moveForward(1);
}
@Override
public Cursor moveForward(int times) {
position = textBuffer.forward(position, times);
return this;
}
@Override
public Cursor moveBackward() {
return moveBackward(1);
}
@Override
public Cursor moveBackward(int times) {
position = textBuffer.backward(position, times);
return this;
}
@Override
public Cursor moveTo(Mark mark) {
validateMark(mark);
position = mark.position();
return this;
}
@Override
public boolean skipBackward(String text) {
String prefix = getPrefix();
if (prefix.endsWith(text)) {
position = new Position(position.lineIndex(), position.columnIndex() - text.length());
return true;
} else {
return false;
}
}
@Override
public boolean skipForward(String text) {
String suffix = getSuffix();
if (suffix.startsWith(text)) {
position = new Position(position.lineIndex(), position.columnIndex() + text.length());
return true;
} else {
return false;
}
}
@Override
public Optional<Match> moveForwardToStartOfMatch(Pattern pattern) {
return moveForwardToXOfMatch(pattern, match -> position = match.startOfMatch());
}
@Override
public Optional<Match> moveForwardToEndOfMatch(Pattern pattern) {
return moveForwardToXOfMatch(pattern, match -> position = match.endOfMatch());
}
private Optional<Match> moveForwardToXOfMatch(Pattern pattern, Consumer<Match> callback) {
Optional<Match> match = textBuffer.findForward(position, pattern);
match.ifPresent(callback);
return match;
}
@Override
public Cursor moveTo(Position position) {
validatePosition(position);
this.position = position;
return this;
}
@Override
public Cursor moveTo(int lineIndex, int columnIndex) {
return moveTo(new Position(lineIndex, columnIndex));
}
@Override
public Cursor write(String text) {
position = textBuffer.write(position, text);
return this;
}
@Override
public Cursor writeLine(String line) {
return write(line).write("\n");
}
@Override
public Cursor writeLines(String... lines) {
return writeLines(Arrays.asList(lines));
}
@Override
public Cursor writeLines(Iterable<String> lines) {
return writeLine(String.join("\n", lines));
}
@Override
public Cursor writeNewline() {
return write("\n");
}
@Override
public Cursor writeNewlineAfter() {
return writeNewline().moveBackward();
}
@Override
public Cursor deleteAll() {
moveToStartOfBuffer();
textBuffer.clear();
return this;
}
@Override
public Cursor deleteLine() {
moveToStartOfLine();
textBuffer.delete(position, textBuffer.getStartOfNextLine(position));
return this;
}
@Override
public Cursor deletePrefix() {
Position originalPosition = position;
moveToStartOfLine();
textBuffer.delete(position, originalPosition);
return this;
}
@Override
public Cursor deleteSuffix() {
textBuffer.delete(position, textBuffer.getEndOfLine(position));
return this;
}
@Override
public Cursor deleteForward() {
return deleteForward(1);
}
@Override
public Cursor deleteForward(int times) {
Position end = textBuffer.forward(position, times);
textBuffer.delete(position, end);
return this;
}
@Override
public Cursor deleteBackward() {
return deleteBackward(1);
}
@Override
public Cursor deleteBackward(int times) {
Position end = position;
moveBackward(times);
textBuffer.delete(position, end);
return this;
}
@Override
public Cursor deleteTo(Mark mark) {
validateMark(mark);
Position start = min(mark.position(), position);
Position end = max(mark.position(), position);
textBuffer.delete(start, end);
return this;
}
@Override
public boolean replaceMatch(Pattern pattern, Function<Match, String> replacer) {
Optional<Match> match = moveForwardToStartOfMatch(pattern);
if (!match.isPresent()) {
return false;
}
textBuffer.delete(match.get().startOfMatch(), match.get().endOfMatch());
write(replacer.apply(match.get()));
return true;
}
@Override
public int replaceMatches(Pattern pattern, Function<Match, String> replacer) {
int count = 0;
for (; replaceMatch(pattern, replacer); ++count) {
}
return count;
}
private void validatePosition(Position position) {
validateLineIndex(position.lineIndex());
int maxColumnIndex = textBuffer.getLine(position.lineIndex()).length();
if (position.columnIndex() < 0 || position.columnIndex() > maxColumnIndex) {
throw new IndexOutOfBoundsException("Column index of " + position.coordinateString() +
" is not in permitted range [0," + maxColumnIndex + "]");
}
}
private void validateLineIndex(int lineIndex) {
int maxLineIndex = textBuffer.getMaxLineIndex();
if (lineIndex < 0 || lineIndex > maxLineIndex) {
throw new IndexOutOfBoundsException("Line index " + lineIndex +
" not in permitted range [0," + maxLineIndex + "]");
}
}
private void validateMark(Mark mark) {
if (mark.secret() != unique) {
throw new IllegalArgumentException("Unknown mark " + mark);
}
if (!mark.version().equals(textBuffer.getVersion())) {
throw new IllegalArgumentException("Mark " + mark + " is outdated");
}
}
} |
That's true. Reverted to former implementation due to using ArrayList, see comment elsewhere. | private void deleteLines(int startIndex, int endIndex) {
for (int i = startIndex; i < endIndex; ++i) {
lines.remove(startIndex);
}
} | lines.remove(startIndex); | private void deleteLines(int startIndex, int endIndex) {
for (int fromIndex = endIndex, toIndex = startIndex; fromIndex <= getMaxLineIndex();
++toIndex, ++fromIndex) {
lines.set(toIndex, lines.get(fromIndex));
}
truncate(getMaxLineIndex() - (endIndex - startIndex));
} | class TextBufferImpl implements TextBuffer {
/** Invariant: {@code size() >= 1}. An empty text buffer {@code => [""]} */
private final LinkedList<String> lines = new LinkedList<>();
private Version version = new Version();
TextBufferImpl() {
lines.add("");
}
TextBufferImpl(String text) {
this();
write(getStartOfText(), text);
version = new Version();
}
@Override
public Version getVersion() {
return version;
}
@Override
public String getString() {
return String.join("\n", lines);
}
@Override
public int getMaxLineIndex() {
return lines.size() - 1;
}
@Override
public String getLine(int lineIndex) {
return lines.get(lineIndex);
}
@Override
public Position write(Position position, String text) {
List<String> linesToInsert = new LinkedList<>(splitString(text, true, false));
if (linesToInsert.isEmpty()) {
return position;
}
linesToInsert.set(0, getLinePrefix(position) + linesToInsert.get(0));
String lastLine = linesToInsert.get(linesToInsert.size() - 1);
int endColumnIndex = lastLine.length();
linesToInsert.set(linesToInsert.size() - 1, lastLine + getLineSuffix(position));
int lineIndex = position.lineIndex();
int endLineIndex = lineIndex + linesToInsert.size() - 1;
lines.set(lineIndex, linesToInsert.remove(0));
lines.addAll(lineIndex + 1, linesToInsert);
incrementVersion();
return new Position(endLineIndex, endColumnIndex);
}
@Override
public void clear() {
lines.clear();
lines.add("");
}
@Override
public void delete(Position start, Position end) {
if (start.isAfter(end)) {
throw new IllegalArgumentException("start position " + start +
" is after end position " + end);
}
String prefix = getLinePrefix(start);
String suffix = getLineSuffix(end);
String stichedLine = prefix + suffix;
lines.set(start.lineIndex(), stichedLine);
deleteLines(start.lineIndex() + 1, end.lineIndex() + 1);
incrementVersion();
}
private void truncate(int newMaxLineIndex) {
while (getMaxLineIndex() > newMaxLineIndex) {
lines.remove(getMaxLineIndex());
}
}
private void incrementVersion() {
version = version.next();
}
} | class TextBufferImpl implements TextBuffer {
/** Invariant: {@code size() >= 1}. An empty text buffer {@code => [""]} */
private final ArrayList<String> lines = new ArrayList<>();
private Version version = new Version();
TextBufferImpl() {
lines.add("");
}
TextBufferImpl(String text) {
this();
write(getStartOfText(), text);
version = new Version();
}
@Override
public Version getVersion() {
return version;
}
@Override
public String getString() {
return String.join("\n", lines);
}
@Override
public int getMaxLineIndex() {
return lines.size() - 1;
}
@Override
public String getLine(int lineIndex) {
return lines.get(lineIndex);
}
@Override
public Position write(Position position, String text) {
List<String> linesToInsert = new LinkedList<>(splitString(text, true, false));
if (linesToInsert.isEmpty()) {
return position;
}
linesToInsert.set(0, getLinePrefix(position) + linesToInsert.get(0));
String lastLine = linesToInsert.get(linesToInsert.size() - 1);
int endColumnIndex = lastLine.length();
linesToInsert.set(linesToInsert.size() - 1, lastLine + getLineSuffix(position));
int lineIndex = position.lineIndex();
int endLineIndex = lineIndex + linesToInsert.size() - 1;
lines.set(lineIndex, linesToInsert.remove(0));
lines.addAll(lineIndex + 1, linesToInsert);
incrementVersion();
return new Position(endLineIndex, endColumnIndex);
}
@Override
public void clear() {
lines.clear();
lines.add("");
}
@Override
public void delete(Position start, Position end) {
if (start.isAfter(end)) {
throw new IllegalArgumentException("start position " + start +
" is after end position " + end);
}
String prefix = getLinePrefix(start);
String suffix = getLineSuffix(end);
String stichedLine = prefix + suffix;
lines.set(start.lineIndex(), stichedLine);
deleteLines(start.lineIndex() + 1, end.lineIndex() + 1);
incrementVersion();
}
private void truncate(int newMaxLineIndex) {
while (getMaxLineIndex() > newMaxLineIndex) {
lines.remove(getMaxLineIndex());
}
}
private void incrementVersion() {
version = version.next();
}
} |
Actually, let me use ArrayList instead of LinkedList. Operations on TextBuffer assume Position, i.e. fast lookup on line index, so there's little to gain on LinkedList but plenty to loose. An alternative Cursor backend could use a LinkedList iterator as the primary data structure for its operations, and that would probably be more efficient, but would have a low level TextBuffer-like API which would be quite different. | private void deleteLines(int startIndex, int endIndex) {
int fromIndex = endIndex;
for (int toIndex = startIndex; fromIndex <= getMaxLineIndex(); ++toIndex, ++fromIndex) {
lines.set(toIndex, lines.get(fromIndex));
}
truncate(getMaxLineIndex() - (endIndex - startIndex));
} | } | private void deleteLines(int startIndex, int endIndex) {
for (int fromIndex = endIndex, toIndex = startIndex; fromIndex <= getMaxLineIndex();
++toIndex, ++fromIndex) {
lines.set(toIndex, lines.get(fromIndex));
}
truncate(getMaxLineIndex() - (endIndex - startIndex));
} | class TextBufferImpl implements TextBuffer {
private final LinkedList<String> lines = new LinkedList<>();
private Version version = new Version();
TextBufferImpl() {
lines.add("");
}
TextBufferImpl(String text) {
lines.add("");
write(getStartOfText(), text);
version = new Version();
}
@Override
public Version getVersion() {
return version;
}
@Override
public String getString() {
return String.join("\n", lines);
}
@Override
public int getMaxLineIndex() {
return lines.size() - 1;
}
@Override
public String getLine(int lineIndex) {
return lines.get(lineIndex);
}
@Override
public Position write(Position position, String text) {
List<String> linesToInsert = splitString(text, true, false);
if (linesToInsert.isEmpty()) {
return position;
}
int lineIndex = position.lineIndex();
String prefix = getLinePrefix(position);
linesToInsert.set(0, prefix + linesToInsert.get(0));
int numberOfLinesToInsert = linesToInsert.size() - 1;
String prefixOfCursorAfterwards = linesToInsert.get(numberOfLinesToInsert);
String suffix = getLineSuffix(position);
String lastLine = prefixOfCursorAfterwards + suffix;
linesToInsert.set(numberOfLinesToInsert, lastLine);
lines.set(lineIndex, linesToInsert.remove(0));
lines.addAll(lineIndex + 1, linesToInsert);
incrementVersion();
return new Position(lineIndex + linesToInsert.size(), prefixOfCursorAfterwards.length());
}
@Override
public void clear() {
lines.clear();
lines.add("");
}
@Override
public void delete(Position start, Position end) {
if (start.isAfter(end)) {
throw new IllegalArgumentException("start position " + start +
" is after end position " + end);
}
String prefix = getLinePrefix(start);
String suffix = getLineSuffix(end);
String stichedLine = prefix + suffix;
lines.set(start.lineIndex(), stichedLine);
deleteLines(start.lineIndex() + 1, end.lineIndex() + 1);
incrementVersion();
}
private void truncate(int newMaxLineIndex) {
while (getMaxLineIndex() > newMaxLineIndex) {
lines.remove(getMaxLineIndex());
}
}
private void incrementVersion() {
version = version.next();
}
} | class TextBufferImpl implements TextBuffer {
/** Invariant: {@code size() >= 1}. An empty text buffer {@code => [""]} */
private final ArrayList<String> lines = new ArrayList<>();
private Version version = new Version();
TextBufferImpl() {
lines.add("");
}
TextBufferImpl(String text) {
this();
write(getStartOfText(), text);
version = new Version();
}
@Override
public Version getVersion() {
return version;
}
@Override
public String getString() {
return String.join("\n", lines);
}
@Override
public int getMaxLineIndex() {
return lines.size() - 1;
}
@Override
public String getLine(int lineIndex) {
return lines.get(lineIndex);
}
@Override
public Position write(Position position, String text) {
List<String> linesToInsert = new LinkedList<>(splitString(text, true, false));
if (linesToInsert.isEmpty()) {
return position;
}
linesToInsert.set(0, getLinePrefix(position) + linesToInsert.get(0));
String lastLine = linesToInsert.get(linesToInsert.size() - 1);
int endColumnIndex = lastLine.length();
linesToInsert.set(linesToInsert.size() - 1, lastLine + getLineSuffix(position));
int lineIndex = position.lineIndex();
int endLineIndex = lineIndex + linesToInsert.size() - 1;
lines.set(lineIndex, linesToInsert.remove(0));
lines.addAll(lineIndex + 1, linesToInsert);
incrementVersion();
return new Position(endLineIndex, endColumnIndex);
}
@Override
public void clear() {
lines.clear();
lines.add("");
}
@Override
public void delete(Position start, Position end) {
if (start.isAfter(end)) {
throw new IllegalArgumentException("start position " + start +
" is after end position " + end);
}
String prefix = getLinePrefix(start);
String suffix = getLineSuffix(end);
String stichedLine = prefix + suffix;
lines.set(start.lineIndex(), stichedLine);
deleteLines(start.lineIndex() + 1, end.lineIndex() + 1);
incrementVersion();
}
private void truncate(int newMaxLineIndex) {
while (getMaxLineIndex() > newMaxLineIndex) {
lines.remove(getMaxLineIndex());
}
}
private void incrementVersion() {
version = version.next();
}
} |
This is neater than the original, anyway :) Mini-nit: I'd declare `fromIndex` in the `for` construct as well, for maximum uniformity! | private void deleteLines(int startIndex, int endIndex) {
int fromIndex = endIndex;
for (int toIndex = startIndex; fromIndex <= getMaxLineIndex(); ++toIndex, ++fromIndex) {
lines.set(toIndex, lines.get(fromIndex));
}
truncate(getMaxLineIndex() - (endIndex - startIndex));
} | lines.set(toIndex, lines.get(fromIndex)); | private void deleteLines(int startIndex, int endIndex) {
for (int fromIndex = endIndex, toIndex = startIndex; fromIndex <= getMaxLineIndex();
++toIndex, ++fromIndex) {
lines.set(toIndex, lines.get(fromIndex));
}
truncate(getMaxLineIndex() - (endIndex - startIndex));
} | class TextBufferImpl implements TextBuffer {
/** Invariant: {@code size() >= 1}. An empty text buffer {@code => [""]} */
private final ArrayList<String> lines = new ArrayList<>();
private Version version = new Version();
TextBufferImpl() {
lines.add("");
}
TextBufferImpl(String text) {
this();
write(getStartOfText(), text);
version = new Version();
}
@Override
public Version getVersion() {
return version;
}
@Override
public String getString() {
return String.join("\n", lines);
}
@Override
public int getMaxLineIndex() {
return lines.size() - 1;
}
@Override
public String getLine(int lineIndex) {
return lines.get(lineIndex);
}
@Override
public Position write(Position position, String text) {
List<String> linesToInsert = new LinkedList<>(splitString(text, true, false));
if (linesToInsert.isEmpty()) {
return position;
}
linesToInsert.set(0, getLinePrefix(position) + linesToInsert.get(0));
String lastLine = linesToInsert.get(linesToInsert.size() - 1);
int endColumnIndex = lastLine.length();
linesToInsert.set(linesToInsert.size() - 1, lastLine + getLineSuffix(position));
int lineIndex = position.lineIndex();
int endLineIndex = lineIndex + linesToInsert.size() - 1;
lines.set(lineIndex, linesToInsert.remove(0));
lines.addAll(lineIndex + 1, linesToInsert);
incrementVersion();
return new Position(endLineIndex, endColumnIndex);
}
@Override
public void clear() {
lines.clear();
lines.add("");
}
@Override
public void delete(Position start, Position end) {
if (start.isAfter(end)) {
throw new IllegalArgumentException("start position " + start +
" is after end position " + end);
}
String prefix = getLinePrefix(start);
String suffix = getLineSuffix(end);
String stichedLine = prefix + suffix;
lines.set(start.lineIndex(), stichedLine);
deleteLines(start.lineIndex() + 1, end.lineIndex() + 1);
incrementVersion();
}
private void truncate(int newMaxLineIndex) {
while (getMaxLineIndex() > newMaxLineIndex) {
lines.remove(getMaxLineIndex());
}
}
private void incrementVersion() {
version = version.next();
}
} | class TextBufferImpl implements TextBuffer {
/** Invariant: {@code size() >= 1}. An empty text buffer {@code => [""]} */
private final ArrayList<String> lines = new ArrayList<>();
private Version version = new Version();
TextBufferImpl() {
lines.add("");
}
TextBufferImpl(String text) {
this();
write(getStartOfText(), text);
version = new Version();
}
@Override
public Version getVersion() {
return version;
}
@Override
public String getString() {
return String.join("\n", lines);
}
@Override
public int getMaxLineIndex() {
return lines.size() - 1;
}
@Override
public String getLine(int lineIndex) {
return lines.get(lineIndex);
}
@Override
public Position write(Position position, String text) {
List<String> linesToInsert = new LinkedList<>(splitString(text, true, false));
if (linesToInsert.isEmpty()) {
return position;
}
linesToInsert.set(0, getLinePrefix(position) + linesToInsert.get(0));
String lastLine = linesToInsert.get(linesToInsert.size() - 1);
int endColumnIndex = lastLine.length();
linesToInsert.set(linesToInsert.size() - 1, lastLine + getLineSuffix(position));
int lineIndex = position.lineIndex();
int endLineIndex = lineIndex + linesToInsert.size() - 1;
lines.set(lineIndex, linesToInsert.remove(0));
lines.addAll(lineIndex + 1, linesToInsert);
incrementVersion();
return new Position(endLineIndex, endColumnIndex);
}
@Override
public void clear() {
lines.clear();
lines.add("");
}
@Override
public void delete(Position start, Position end) {
if (start.isAfter(end)) {
throw new IllegalArgumentException("start position " + start +
" is after end position " + end);
}
String prefix = getLinePrefix(start);
String suffix = getLineSuffix(end);
String stichedLine = prefix + suffix;
lines.set(start.lineIndex(), stichedLine);
deleteLines(start.lineIndex() + 1, end.lineIndex() + 1);
incrementVersion();
}
private void truncate(int newMaxLineIndex) {
while (getMaxLineIndex() > newMaxLineIndex) {
lines.remove(getMaxLineIndex());
}
}
private void incrementVersion() {
version = version.next();
}
} |
Yes, I think it's good we had this discussion (offline). | private void deleteLines(int startIndex, int endIndex) {
int fromIndex = endIndex;
for (int toIndex = startIndex; fromIndex <= getMaxLineIndex(); ++toIndex, ++fromIndex) {
lines.set(toIndex, lines.get(fromIndex));
}
truncate(getMaxLineIndex() - (endIndex - startIndex));
} | } | private void deleteLines(int startIndex, int endIndex) {
for (int fromIndex = endIndex, toIndex = startIndex; fromIndex <= getMaxLineIndex();
++toIndex, ++fromIndex) {
lines.set(toIndex, lines.get(fromIndex));
}
truncate(getMaxLineIndex() - (endIndex - startIndex));
} | class TextBufferImpl implements TextBuffer {
private final LinkedList<String> lines = new LinkedList<>();
private Version version = new Version();
TextBufferImpl() {
lines.add("");
}
TextBufferImpl(String text) {
lines.add("");
write(getStartOfText(), text);
version = new Version();
}
@Override
public Version getVersion() {
return version;
}
@Override
public String getString() {
return String.join("\n", lines);
}
@Override
public int getMaxLineIndex() {
return lines.size() - 1;
}
@Override
public String getLine(int lineIndex) {
return lines.get(lineIndex);
}
@Override
public Position write(Position position, String text) {
List<String> linesToInsert = splitString(text, true, false);
if (linesToInsert.isEmpty()) {
return position;
}
int lineIndex = position.lineIndex();
String prefix = getLinePrefix(position);
linesToInsert.set(0, prefix + linesToInsert.get(0));
int numberOfLinesToInsert = linesToInsert.size() - 1;
String prefixOfCursorAfterwards = linesToInsert.get(numberOfLinesToInsert);
String suffix = getLineSuffix(position);
String lastLine = prefixOfCursorAfterwards + suffix;
linesToInsert.set(numberOfLinesToInsert, lastLine);
lines.set(lineIndex, linesToInsert.remove(0));
lines.addAll(lineIndex + 1, linesToInsert);
incrementVersion();
return new Position(lineIndex + linesToInsert.size(), prefixOfCursorAfterwards.length());
}
@Override
public void clear() {
lines.clear();
lines.add("");
}
@Override
public void delete(Position start, Position end) {
if (start.isAfter(end)) {
throw new IllegalArgumentException("start position " + start +
" is after end position " + end);
}
String prefix = getLinePrefix(start);
String suffix = getLineSuffix(end);
String stichedLine = prefix + suffix;
lines.set(start.lineIndex(), stichedLine);
deleteLines(start.lineIndex() + 1, end.lineIndex() + 1);
incrementVersion();
}
private void truncate(int newMaxLineIndex) {
while (getMaxLineIndex() > newMaxLineIndex) {
lines.remove(getMaxLineIndex());
}
}
private void incrementVersion() {
version = version.next();
}
} | class TextBufferImpl implements TextBuffer {
/** Invariant: {@code size() >= 1}. An empty text buffer {@code => [""]} */
private final ArrayList<String> lines = new ArrayList<>();
private Version version = new Version();
TextBufferImpl() {
lines.add("");
}
TextBufferImpl(String text) {
this();
write(getStartOfText(), text);
version = new Version();
}
@Override
public Version getVersion() {
return version;
}
@Override
public String getString() {
return String.join("\n", lines);
}
@Override
public int getMaxLineIndex() {
return lines.size() - 1;
}
@Override
public String getLine(int lineIndex) {
return lines.get(lineIndex);
}
@Override
public Position write(Position position, String text) {
List<String> linesToInsert = new LinkedList<>(splitString(text, true, false));
if (linesToInsert.isEmpty()) {
return position;
}
linesToInsert.set(0, getLinePrefix(position) + linesToInsert.get(0));
String lastLine = linesToInsert.get(linesToInsert.size() - 1);
int endColumnIndex = lastLine.length();
linesToInsert.set(linesToInsert.size() - 1, lastLine + getLineSuffix(position));
int lineIndex = position.lineIndex();
int endLineIndex = lineIndex + linesToInsert.size() - 1;
lines.set(lineIndex, linesToInsert.remove(0));
lines.addAll(lineIndex + 1, linesToInsert);
incrementVersion();
return new Position(endLineIndex, endColumnIndex);
}
@Override
public void clear() {
lines.clear();
lines.add("");
}
@Override
public void delete(Position start, Position end) {
if (start.isAfter(end)) {
throw new IllegalArgumentException("start position " + start +
" is after end position " + end);
}
String prefix = getLinePrefix(start);
String suffix = getLineSuffix(end);
String stichedLine = prefix + suffix;
lines.set(start.lineIndex(), stichedLine);
deleteLines(start.lineIndex() + 1, end.lineIndex() + 1);
incrementVersion();
}
private void truncate(int newMaxLineIndex) {
while (getMaxLineIndex() > newMaxLineIndex) {
lines.remove(getMaxLineIndex());
}
}
private void incrementVersion() {
version = version.next();
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.