comment
stringlengths
1
45k
method_body
stringlengths
23
281k
target_code
stringlengths
0
5.16k
method_body_after
stringlengths
12
281k
context_before
stringlengths
8
543k
context_after
stringlengths
8
543k
Oh, this _is_ the original 😝
private void deleteLines(int startIndex, int endIndex) { int fromIndex = endIndex; for (int toIndex = startIndex; fromIndex <= getMaxLineIndex(); ++toIndex, ++fromIndex) { lines.set(toIndex, lines.get(fromIndex)); } truncate(getMaxLineIndex() - (endIndex - startIndex)); }
lines.set(toIndex, lines.get(fromIndex));
private void deleteLines(int startIndex, int endIndex) { for (int fromIndex = endIndex, toIndex = startIndex; fromIndex <= getMaxLineIndex(); ++toIndex, ++fromIndex) { lines.set(toIndex, lines.get(fromIndex)); } truncate(getMaxLineIndex() - (endIndex - startIndex)); }
class TextBufferImpl implements TextBuffer { /** Invariant: {@code size() >= 1}. An empty text buffer {@code => [""]} */ private final ArrayList<String> lines = new ArrayList<>(); private Version version = new Version(); TextBufferImpl() { lines.add(""); } TextBufferImpl(String text) { this(); write(getStartOfText(), text); version = new Version(); } @Override public Version getVersion() { return version; } @Override public String getString() { return String.join("\n", lines); } @Override public int getMaxLineIndex() { return lines.size() - 1; } @Override public String getLine(int lineIndex) { return lines.get(lineIndex); } @Override public Position write(Position position, String text) { List<String> linesToInsert = new LinkedList<>(splitString(text, true, false)); if (linesToInsert.isEmpty()) { return position; } linesToInsert.set(0, getLinePrefix(position) + linesToInsert.get(0)); String lastLine = linesToInsert.get(linesToInsert.size() - 1); int endColumnIndex = lastLine.length(); linesToInsert.set(linesToInsert.size() - 1, lastLine + getLineSuffix(position)); int lineIndex = position.lineIndex(); int endLineIndex = lineIndex + linesToInsert.size() - 1; lines.set(lineIndex, linesToInsert.remove(0)); lines.addAll(lineIndex + 1, linesToInsert); incrementVersion(); return new Position(endLineIndex, endColumnIndex); } @Override public void clear() { lines.clear(); lines.add(""); } @Override public void delete(Position start, Position end) { if (start.isAfter(end)) { throw new IllegalArgumentException("start position " + start + " is after end position " + end); } String prefix = getLinePrefix(start); String suffix = getLineSuffix(end); String stichedLine = prefix + suffix; lines.set(start.lineIndex(), stichedLine); deleteLines(start.lineIndex() + 1, end.lineIndex() + 1); incrementVersion(); } private void truncate(int newMaxLineIndex) { while (getMaxLineIndex() > newMaxLineIndex) { lines.remove(getMaxLineIndex()); } } private void incrementVersion() { version = version.next(); } }
class TextBufferImpl implements TextBuffer { /** Invariant: {@code size() >= 1}. An empty text buffer {@code => [""]} */ private final ArrayList<String> lines = new ArrayList<>(); private Version version = new Version(); TextBufferImpl() { lines.add(""); } TextBufferImpl(String text) { this(); write(getStartOfText(), text); version = new Version(); } @Override public Version getVersion() { return version; } @Override public String getString() { return String.join("\n", lines); } @Override public int getMaxLineIndex() { return lines.size() - 1; } @Override public String getLine(int lineIndex) { return lines.get(lineIndex); } @Override public Position write(Position position, String text) { List<String> linesToInsert = new LinkedList<>(splitString(text, true, false)); if (linesToInsert.isEmpty()) { return position; } linesToInsert.set(0, getLinePrefix(position) + linesToInsert.get(0)); String lastLine = linesToInsert.get(linesToInsert.size() - 1); int endColumnIndex = lastLine.length(); linesToInsert.set(linesToInsert.size() - 1, lastLine + getLineSuffix(position)); int lineIndex = position.lineIndex(); int endLineIndex = lineIndex + linesToInsert.size() - 1; lines.set(lineIndex, linesToInsert.remove(0)); lines.addAll(lineIndex + 1, linesToInsert); incrementVersion(); return new Position(endLineIndex, endColumnIndex); } @Override public void clear() { lines.clear(); lines.add(""); } @Override public void delete(Position start, Position end) { if (start.isAfter(end)) { throw new IllegalArgumentException("start position " + start + " is after end position " + end); } String prefix = getLinePrefix(start); String suffix = getLineSuffix(end); String stichedLine = prefix + suffix; lines.set(start.lineIndex(), stichedLine); deleteLines(start.lineIndex() + 1, end.lineIndex() + 1); incrementVersion(); } private void truncate(int newMaxLineIndex) { while (getMaxLineIndex() > newMaxLineIndex) { lines.remove(getMaxLineIndex()); } } private void incrementVersion() { version = version.next(); } }
It's just that it now fits the list type 😁
private void deleteLines(int startIndex, int endIndex) { int fromIndex = endIndex; for (int toIndex = startIndex; fromIndex <= getMaxLineIndex(); ++toIndex, ++fromIndex) { lines.set(toIndex, lines.get(fromIndex)); } truncate(getMaxLineIndex() - (endIndex - startIndex)); }
lines.set(toIndex, lines.get(fromIndex));
private void deleteLines(int startIndex, int endIndex) { for (int fromIndex = endIndex, toIndex = startIndex; fromIndex <= getMaxLineIndex(); ++toIndex, ++fromIndex) { lines.set(toIndex, lines.get(fromIndex)); } truncate(getMaxLineIndex() - (endIndex - startIndex)); }
class TextBufferImpl implements TextBuffer { /** Invariant: {@code size() >= 1}. An empty text buffer {@code => [""]} */ private final ArrayList<String> lines = new ArrayList<>(); private Version version = new Version(); TextBufferImpl() { lines.add(""); } TextBufferImpl(String text) { this(); write(getStartOfText(), text); version = new Version(); } @Override public Version getVersion() { return version; } @Override public String getString() { return String.join("\n", lines); } @Override public int getMaxLineIndex() { return lines.size() - 1; } @Override public String getLine(int lineIndex) { return lines.get(lineIndex); } @Override public Position write(Position position, String text) { List<String> linesToInsert = new LinkedList<>(splitString(text, true, false)); if (linesToInsert.isEmpty()) { return position; } linesToInsert.set(0, getLinePrefix(position) + linesToInsert.get(0)); String lastLine = linesToInsert.get(linesToInsert.size() - 1); int endColumnIndex = lastLine.length(); linesToInsert.set(linesToInsert.size() - 1, lastLine + getLineSuffix(position)); int lineIndex = position.lineIndex(); int endLineIndex = lineIndex + linesToInsert.size() - 1; lines.set(lineIndex, linesToInsert.remove(0)); lines.addAll(lineIndex + 1, linesToInsert); incrementVersion(); return new Position(endLineIndex, endColumnIndex); } @Override public void clear() { lines.clear(); lines.add(""); } @Override public void delete(Position start, Position end) { if (start.isAfter(end)) { throw new IllegalArgumentException("start position " + start + " is after end position " + end); } String prefix = getLinePrefix(start); String suffix = getLineSuffix(end); String stichedLine = prefix + suffix; lines.set(start.lineIndex(), stichedLine); deleteLines(start.lineIndex() + 1, end.lineIndex() + 1); incrementVersion(); } private void truncate(int newMaxLineIndex) { while (getMaxLineIndex() > newMaxLineIndex) { lines.remove(getMaxLineIndex()); } } private void incrementVersion() { version = version.next(); } }
class TextBufferImpl implements TextBuffer { /** Invariant: {@code size() >= 1}. An empty text buffer {@code => [""]} */ private final ArrayList<String> lines = new ArrayList<>(); private Version version = new Version(); TextBufferImpl() { lines.add(""); } TextBufferImpl(String text) { this(); write(getStartOfText(), text); version = new Version(); } @Override public Version getVersion() { return version; } @Override public String getString() { return String.join("\n", lines); } @Override public int getMaxLineIndex() { return lines.size() - 1; } @Override public String getLine(int lineIndex) { return lines.get(lineIndex); } @Override public Position write(Position position, String text) { List<String> linesToInsert = new LinkedList<>(splitString(text, true, false)); if (linesToInsert.isEmpty()) { return position; } linesToInsert.set(0, getLinePrefix(position) + linesToInsert.get(0)); String lastLine = linesToInsert.get(linesToInsert.size() - 1); int endColumnIndex = lastLine.length(); linesToInsert.set(linesToInsert.size() - 1, lastLine + getLineSuffix(position)); int lineIndex = position.lineIndex(); int endLineIndex = lineIndex + linesToInsert.size() - 1; lines.set(lineIndex, linesToInsert.remove(0)); lines.addAll(lineIndex + 1, linesToInsert); incrementVersion(); return new Position(endLineIndex, endColumnIndex); } @Override public void clear() { lines.clear(); lines.add(""); } @Override public void delete(Position start, Position end) { if (start.isAfter(end)) { throw new IllegalArgumentException("start position " + start + " is after end position " + end); } String prefix = getLinePrefix(start); String suffix = getLineSuffix(end); String stichedLine = prefix + suffix; lines.set(start.lineIndex(), stichedLine); deleteLines(start.lineIndex() + 1, end.lineIndex() + 1); incrementVersion(); } private void truncate(int newMaxLineIndex) { while (getMaxLineIndex() > newMaxLineIndex) { lines.remove(getMaxLineIndex()); } } private void incrementVersion() { version = version.next(); } }
exactly
private void deleteLines(int startIndex, int endIndex) { int fromIndex = endIndex; for (int toIndex = startIndex; fromIndex <= getMaxLineIndex(); ++toIndex, ++fromIndex) { lines.set(toIndex, lines.get(fromIndex)); } truncate(getMaxLineIndex() - (endIndex - startIndex)); }
lines.set(toIndex, lines.get(fromIndex));
private void deleteLines(int startIndex, int endIndex) { for (int fromIndex = endIndex, toIndex = startIndex; fromIndex <= getMaxLineIndex(); ++toIndex, ++fromIndex) { lines.set(toIndex, lines.get(fromIndex)); } truncate(getMaxLineIndex() - (endIndex - startIndex)); }
class TextBufferImpl implements TextBuffer { /** Invariant: {@code size() >= 1}. An empty text buffer {@code => [""]} */ private final ArrayList<String> lines = new ArrayList<>(); private Version version = new Version(); TextBufferImpl() { lines.add(""); } TextBufferImpl(String text) { this(); write(getStartOfText(), text); version = new Version(); } @Override public Version getVersion() { return version; } @Override public String getString() { return String.join("\n", lines); } @Override public int getMaxLineIndex() { return lines.size() - 1; } @Override public String getLine(int lineIndex) { return lines.get(lineIndex); } @Override public Position write(Position position, String text) { List<String> linesToInsert = new LinkedList<>(splitString(text, true, false)); if (linesToInsert.isEmpty()) { return position; } linesToInsert.set(0, getLinePrefix(position) + linesToInsert.get(0)); String lastLine = linesToInsert.get(linesToInsert.size() - 1); int endColumnIndex = lastLine.length(); linesToInsert.set(linesToInsert.size() - 1, lastLine + getLineSuffix(position)); int lineIndex = position.lineIndex(); int endLineIndex = lineIndex + linesToInsert.size() - 1; lines.set(lineIndex, linesToInsert.remove(0)); lines.addAll(lineIndex + 1, linesToInsert); incrementVersion(); return new Position(endLineIndex, endColumnIndex); } @Override public void clear() { lines.clear(); lines.add(""); } @Override public void delete(Position start, Position end) { if (start.isAfter(end)) { throw new IllegalArgumentException("start position " + start + " is after end position " + end); } String prefix = getLinePrefix(start); String suffix = getLineSuffix(end); String stichedLine = prefix + suffix; lines.set(start.lineIndex(), stichedLine); deleteLines(start.lineIndex() + 1, end.lineIndex() + 1); incrementVersion(); } private void truncate(int newMaxLineIndex) { while (getMaxLineIndex() > newMaxLineIndex) { lines.remove(getMaxLineIndex()); } } private void incrementVersion() { version = version.next(); } }
class TextBufferImpl implements TextBuffer { /** Invariant: {@code size() >= 1}. An empty text buffer {@code => [""]} */ private final ArrayList<String> lines = new ArrayList<>(); private Version version = new Version(); TextBufferImpl() { lines.add(""); } TextBufferImpl(String text) { this(); write(getStartOfText(), text); version = new Version(); } @Override public Version getVersion() { return version; } @Override public String getString() { return String.join("\n", lines); } @Override public int getMaxLineIndex() { return lines.size() - 1; } @Override public String getLine(int lineIndex) { return lines.get(lineIndex); } @Override public Position write(Position position, String text) { List<String> linesToInsert = new LinkedList<>(splitString(text, true, false)); if (linesToInsert.isEmpty()) { return position; } linesToInsert.set(0, getLinePrefix(position) + linesToInsert.get(0)); String lastLine = linesToInsert.get(linesToInsert.size() - 1); int endColumnIndex = lastLine.length(); linesToInsert.set(linesToInsert.size() - 1, lastLine + getLineSuffix(position)); int lineIndex = position.lineIndex(); int endLineIndex = lineIndex + linesToInsert.size() - 1; lines.set(lineIndex, linesToInsert.remove(0)); lines.addAll(lineIndex + 1, linesToInsert); incrementVersion(); return new Position(endLineIndex, endColumnIndex); } @Override public void clear() { lines.clear(); lines.add(""); } @Override public void delete(Position start, Position end) { if (start.isAfter(end)) { throw new IllegalArgumentException("start position " + start + " is after end position " + end); } String prefix = getLinePrefix(start); String suffix = getLineSuffix(end); String stichedLine = prefix + suffix; lines.set(start.lineIndex(), stichedLine); deleteLines(start.lineIndex() + 1, end.lineIndex() + 1); incrementVersion(); } private void truncate(int newMaxLineIndex) { while (getMaxLineIndex() > newMaxLineIndex) { lines.remove(getMaxLineIndex()); } } private void incrementVersion() { version = version.next(); } }
Done
private void deleteLines(int startIndex, int endIndex) { int fromIndex = endIndex; for (int toIndex = startIndex; fromIndex <= getMaxLineIndex(); ++toIndex, ++fromIndex) { lines.set(toIndex, lines.get(fromIndex)); } truncate(getMaxLineIndex() - (endIndex - startIndex)); }
lines.set(toIndex, lines.get(fromIndex));
private void deleteLines(int startIndex, int endIndex) { for (int fromIndex = endIndex, toIndex = startIndex; fromIndex <= getMaxLineIndex(); ++toIndex, ++fromIndex) { lines.set(toIndex, lines.get(fromIndex)); } truncate(getMaxLineIndex() - (endIndex - startIndex)); }
class TextBufferImpl implements TextBuffer { /** Invariant: {@code size() >= 1}. An empty text buffer {@code => [""]} */ private final ArrayList<String> lines = new ArrayList<>(); private Version version = new Version(); TextBufferImpl() { lines.add(""); } TextBufferImpl(String text) { this(); write(getStartOfText(), text); version = new Version(); } @Override public Version getVersion() { return version; } @Override public String getString() { return String.join("\n", lines); } @Override public int getMaxLineIndex() { return lines.size() - 1; } @Override public String getLine(int lineIndex) { return lines.get(lineIndex); } @Override public Position write(Position position, String text) { List<String> linesToInsert = new LinkedList<>(splitString(text, true, false)); if (linesToInsert.isEmpty()) { return position; } linesToInsert.set(0, getLinePrefix(position) + linesToInsert.get(0)); String lastLine = linesToInsert.get(linesToInsert.size() - 1); int endColumnIndex = lastLine.length(); linesToInsert.set(linesToInsert.size() - 1, lastLine + getLineSuffix(position)); int lineIndex = position.lineIndex(); int endLineIndex = lineIndex + linesToInsert.size() - 1; lines.set(lineIndex, linesToInsert.remove(0)); lines.addAll(lineIndex + 1, linesToInsert); incrementVersion(); return new Position(endLineIndex, endColumnIndex); } @Override public void clear() { lines.clear(); lines.add(""); } @Override public void delete(Position start, Position end) { if (start.isAfter(end)) { throw new IllegalArgumentException("start position " + start + " is after end position " + end); } String prefix = getLinePrefix(start); String suffix = getLineSuffix(end); String stichedLine = prefix + suffix; lines.set(start.lineIndex(), stichedLine); deleteLines(start.lineIndex() + 1, end.lineIndex() + 1); incrementVersion(); } private void truncate(int newMaxLineIndex) { while (getMaxLineIndex() > newMaxLineIndex) { lines.remove(getMaxLineIndex()); } } private void incrementVersion() { version = version.next(); } }
class TextBufferImpl implements TextBuffer { /** Invariant: {@code size() >= 1}. An empty text buffer {@code => [""]} */ private final ArrayList<String> lines = new ArrayList<>(); private Version version = new Version(); TextBufferImpl() { lines.add(""); } TextBufferImpl(String text) { this(); write(getStartOfText(), text); version = new Version(); } @Override public Version getVersion() { return version; } @Override public String getString() { return String.join("\n", lines); } @Override public int getMaxLineIndex() { return lines.size() - 1; } @Override public String getLine(int lineIndex) { return lines.get(lineIndex); } @Override public Position write(Position position, String text) { List<String> linesToInsert = new LinkedList<>(splitString(text, true, false)); if (linesToInsert.isEmpty()) { return position; } linesToInsert.set(0, getLinePrefix(position) + linesToInsert.get(0)); String lastLine = linesToInsert.get(linesToInsert.size() - 1); int endColumnIndex = lastLine.length(); linesToInsert.set(linesToInsert.size() - 1, lastLine + getLineSuffix(position)); int lineIndex = position.lineIndex(); int endLineIndex = lineIndex + linesToInsert.size() - 1; lines.set(lineIndex, linesToInsert.remove(0)); lines.addAll(lineIndex + 1, linesToInsert); incrementVersion(); return new Position(endLineIndex, endColumnIndex); } @Override public void clear() { lines.clear(); lines.add(""); } @Override public void delete(Position start, Position end) { if (start.isAfter(end)) { throw new IllegalArgumentException("start position " + start + " is after end position " + end); } String prefix = getLinePrefix(start); String suffix = getLineSuffix(end); String stichedLine = prefix + suffix; lines.set(start.lineIndex(), stichedLine); deleteLines(start.lineIndex() + 1, end.lineIndex() + 1); incrementVersion(); } private void truncate(int newMaxLineIndex) { while (getMaxLineIndex() > newMaxLineIndex) { lines.remove(getMaxLineIndex()); } } private void incrementVersion() { version = version.next(); } }
Consider extracting a method for this: `boolean canTrigger(job, application)`.
private List<Job> computeReadyJobs(ApplicationId id) { List<Job> jobs = new ArrayList<>(); applications().get(id).ifPresent(application -> { Change change = application.changeAt(clock.instant()); Optional<Instant> completedAt = max(application.deploymentJobs().statusOf(systemTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at)), application.deploymentJobs().statusOf(stagingTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at))); String reason = "New change available"; List<Job> testJobs = null; if (change.isPresent()) for (Step step : productionStepsOf(application)) { Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet()); Map<Optional<Instant>, List<JobType>> jobsByCompletion = stepJobs.stream().collect(groupingBy(job -> completedAt(change, application, job))); if (jobsByCompletion.containsKey(Optional.empty())) { for (JobType job : jobsByCompletion.get(Optional.empty())) { Versions versions = versions(application, change, deploymentFor(application, job)); if (isTested(application, versions)) { if ( completedAt.isPresent() && jobStateIsAmong(application, job, idle) && stepJobs.containsAll(runningProductionJobs(application))) jobs.add(deploymentJob(application, versions, change, job, reason, completedAt.get())); if ( ! alreadyTriggered(application, versions)) testJobs = emptyList(); } else if (testJobs == null) { testJobs = testJobs(application, versions, String.format("Testing deployment for %s (%s)", job.jobName(), versions.toString()), completedAt.orElse(clock.instant())); } } completedAt = Optional.empty(); } else { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = jobsByCompletion.keySet().stream().map(Optional::get).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } } if (testJobs == null) testJobs = testJobs(application, versions(application, application.change(), Optional.empty()), "Testing last changes outside prod", clock.instant()); jobs.addAll(testJobs); }); return jobs; }
&& jobStateIsAmong(application, job, idle)
private List<Job> computeReadyJobs(ApplicationId id) { List<Job> jobs = new ArrayList<>(); applications().get(id).ifPresent(application -> { Change change = application.changeAt(clock.instant()); Optional<Instant> completedAt = max(application.deploymentJobs().statusOf(systemTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at)), application.deploymentJobs().statusOf(stagingTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at))); String reason = "New change available"; List<Job> testJobs = null; if (change.isPresent()) for (Step step : productionStepsOf(application)) { Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet()); List<JobType> remainingJobs = stepJobs.stream().filter(job -> ! isComplete(change, application, job)).collect(toList()); if ( ! remainingJobs.isEmpty()) { for (JobType job : remainingJobs) { Versions versions = versions(application, change, deploymentFor(application, job)); if (isTested(application, versions)) { if ( completedAt.isPresent() && jobStateOf(application, job) == idle && stepJobs.containsAll(runningProductionJobs(application))) jobs.add(deploymentJob(application, versions, change, job, reason, completedAt.get())); if ( ! alreadyTriggered(application, versions)) testJobs = emptyList(); } else if (testJobs == null) { testJobs = testJobs(application, versions, String.format("Testing deployment for %s (%s)", job.jobName(), versions.toString()), completedAt.orElse(clock.instant())); } } completedAt = Optional.empty(); } else { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = stepJobs.stream().map(job -> application.deploymentJobs().statusOf(job).get().lastCompleted().get().at()).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } } if (testJobs == null) testJobs = testJobs(application, versions(application, application.change(), Optional.empty()), "Testing last changes outside prod", clock.instant()); jobs.addAll(testJobs); }); return jobs; }
class DeploymentTrigger { private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentOrder order; private final BuildService buildService; public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) { Objects.requireNonNull(controller, "controller cannot be null"); Objects.requireNonNull(curator, "curator cannot be null"); Objects.requireNonNull(clock, "clock cannot be null"); this.controller = controller; this.clock = clock; this.order = new DeploymentOrder(controller::system); this.buildService = buildService; } public DeploymentOrder deploymentOrder() { return order; } /** * Records information when a job completes (successfully or not). This information is used when deciding what to * trigger next. */ public void notifyOfCompletion(JobReport report) { log.log(LogLevel.INFO, String.format("Notified of %s for %s of %s (%d).", report.jobError().map(e -> e.toString() + " error") .orElse("success"), report.jobType(), report.applicationId(), report.projectId())); if ( ! applications().get(report.applicationId()).isPresent()) { log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } applications().lockOrThrow(report.applicationId(), application -> { JobRun triggering; if (report.jobType() == component) { ApplicationVersion applicationVersion = ApplicationVersion.from(report.sourceRevision().get(), report.buildNumber()); triggering = JobRun.triggering(controller.systemVersion(), applicationVersion, Optional .empty(), Optional.empty(), "Application commit", clock.instant()); if (report.success()) { if (acceptNewApplicationVersion(application)) application = application.withChange(application.change().with(applicationVersion)) .withOutstandingChange(Change.empty()); else application = application.withOutstandingChange(Change.of(applicationVersion)); } } else { triggering = application.deploymentJobs().statusOf(report.jobType()).flatMap(JobStatus::lastTriggered) .orElseThrow(() -> new IllegalStateException("Notified of completion of " + report.jobType().jobName() + " for " + report.applicationId() + ", but that has neither been triggered nor deployed")); } application = application.withJobCompletion(report.projectId(), report.jobType(), triggering.completion(report.buildNumber(), clock.instant()), report.jobError()); application = application.withChange(remainingChange(application)); applications().store(application); }); } /** Returns a map of jobs that are scheduled to be run, grouped by the job type */ public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() { return computeReadyJobs().stream().collect(groupingBy(Job::jobType)); } /** * Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs. * * Only one job is triggered each run for test jobs, since their environments have limited capacity. */ public long triggerReadyJobs() { return computeReadyJobs().stream() .collect(partitioningBy(job -> job.jobType().isTest())) .entrySet().stream() .flatMap(entry -> (entry.getKey() ? entry.getValue().stream() .sorted(comparing(Job::isRetry) .thenComparing(Job::applicationUpgrade) .reversed() .thenComparing(Job::availableSince)) .collect(groupingBy(Job::jobType)) : entry.getValue().stream() .collect(groupingBy(Job::applicationId))) .values().stream() .map(jobs -> (Supplier<Long>) jobs.stream() .filter(this::trigger) .limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count)) .parallel().map(Supplier::get).reduce(0L, Long::sum); } /** * Attempts to trigger the given job for the given application and returns the outcome. * * If the build service can not find the given job, or claims it is illegal to trigger it, * the project id is removed from the application owning the job, to prevent further trigger attemps. */ public boolean trigger(Job job) { log.log(LogLevel.INFO, String.format("Triggering %s: %s", job, job.triggering)); try { buildService.trigger(job); applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withJobTriggering(job.jobType, job.triggering))); return true; } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e); if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException) applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withProjectId(OptionalLong.empty()))); return false; } } /** Force triggering of a job for given application. */ public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType, String user) { Application application = applications().require(applicationId); if (jobType == component) { buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); return singletonList(component); } Versions versions = versions(application, application.change(), deploymentFor(application, jobType)); String reason = "Job triggered manually by " + user; return (jobType.isProduction() && ! isTested(application, versions) ? testJobs(application, versions, reason, clock.instant()).stream() : Stream.of(deploymentJob(application, versions, application.change(), jobType, reason, clock.instant()))) .peek(this::trigger) .map(Job::jobType).collect(toList()); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already has an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application); }); } /** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .filter(__ -> keepApplicationChange) .map(Change::of) .orElse(Change.empty()))); }); } private ApplicationController applications() { return controller.applications(); } private Optional<JobRun> successOn(Application application, JobType jobType, Versions versions) { return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .filter(run -> targetsMatch(versions, run)); } private Optional<Deployment> deploymentFor(Application application, JobType jobType) { return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()).get())); } private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) { return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2; } /** Returns the set of all jobs which have changes to propagate from the upstream steps. */ private List<Job> computeReadyJobs() { return ApplicationList.from(applications().asList()) .notPullRequest() .withProjectId() .deploying() .idList().stream() .map(this::computeReadyJobs) .flatMap(Collection::stream) .collect(toList()); } /** * Finds the next step to trigger for the given application, if any, and returns these as a list. */ private List<JobType> runningProductionJobs(Application application) { return application.deploymentJobs().jobStatus().keySet().parallelStream() .filter(JobType::isProduction) .filter(job -> isRunning(application, job)) .collect(toList()); } /** Returns whether the given job is currently running; false if completed since last triggered, asking the build service otherwise. */ private boolean isRunning(Application application, JobType jobType) { return ! application.deploymentJobs().statusOf(jobType) .flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))) .orElse(false) && jobStateIsAmong(application, jobType, running, queued); } private boolean jobStateIsAmong(Application application, JobType jobType, JobState... states) { return Arrays.asList(states).contains(buildService.stateOf(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()))); } /** * Returns the instant when the given change is complete for the given application for the given job. * * Any job is complete if the given change is already successful on that job. * A production job is also considered complete if its current change is strictly dominated by what * is already deployed in its zone, i.e., no parts of the change are upgrades, and the full current * change for the application downgrades the deployment, which is an acknowledgement that the deployed * version is broken somehow, such that the job may be locked in failure until a new version is released. */ private Optional<Instant> completedAt(Change change, Application application, JobType jobType) { Versions versions = versions(application, change, deploymentFor(application, jobType)); Optional<JobRun> lastSuccess = successOn(application, jobType, versions); if (lastSuccess.isPresent() || ! jobType.isProduction()) return lastSuccess.map(JobRun::at); return deploymentFor(application, jobType) .filter(deployment -> ! isUpgrade(change, deployment) && isDowngrade(application.change(), deployment)) .map(Deployment::at); } private static boolean isUpgrade(Change change, Deployment deployment) { return change.upgrades(deployment.version()) || change.upgrades(deployment.applicationVersion()); } private static boolean isDowngrade(Change change, Deployment deployment) { return change.downgrades(deployment.version()) || change.downgrades(deployment.applicationVersion()); } private boolean isTested(Application application, Versions versions) { return testedAt(application, versions).isPresent() || alreadyTriggered(application, versions); } private Optional<Instant> testedAt(Application application, Versions versions) { Optional<JobRun> testRun = successOn(application, systemTest, versions); Optional<JobRun> stagingRun = successOn(application, stagingTest, versions) .filter(run -> sourcesMatchIfPresent(versions, run)); return max(testRun.map(JobRun::at), stagingRun.map(JobRun::at)) .filter(__ -> testRun.isPresent() && stagingRun.isPresent()); } private boolean alreadyTriggered(Application application, Versions versions) { return application.deploymentJobs().jobStatus().values().stream() .filter(job -> job.type().isProduction()) .anyMatch(job -> job.lastTriggered() .filter(run -> targetsMatch(versions, run)) .filter(run -> sourcesMatchIfPresent(versions, run)) .isPresent()); } /** If the given state's sources are present and differ from its targets, returns whether they are equal to those * of the given job run. */ private static boolean sourcesMatchIfPresent(Versions versions, JobRun jobRun) { return ( ! versions.sourcePlatform.filter(version -> ! version.equals(versions.targetPlatform)).isPresent() || versions.sourcePlatform.equals(jobRun.sourcePlatform())) && ( ! versions.sourceApplication.filter(version -> ! version.equals(versions.targetApplication)).isPresent() || versions.sourceApplication.equals(jobRun.sourceApplication())); } private static boolean targetsMatch(Versions versions, JobRun jobRun) { return versions.targetPlatform.equals(jobRun.platform()) && versions.targetApplication.equals(jobRun.application()); } private boolean acceptNewApplicationVersion(Application application) { if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; return ! application.changeAt(clock.instant()).platform().isPresent(); } private Change remainingChange(Application application) { List<JobType> jobs = productionStepsOf(application).isEmpty() ? jobsOf(testStepsOf(application)) : jobsOf(productionStepsOf(application)); Change change = application.change(); if (jobs.stream().allMatch(job -> completedAt(application.change().withoutApplication(), application, job).isPresent())) change = change.withoutPlatform(); if (jobs.stream().allMatch(job -> completedAt(application.change().withoutPlatform(), application, job).isPresent())) change = change.withoutApplication(); return change; } /** * Returns the list of test jobs that should run now, and that need to succeed on the given versions for it to be considered tested. */ private List<Job> testJobs(Application application, Versions versions, String reason, Instant availableSince) { List<Job> jobs = new ArrayList<>(); for (JobType jobType : jobsOf(testStepsOf(application))) { Optional<JobRun> completion = successOn(application, jobType, versions) .filter(run -> sourcesMatchIfPresent(versions, run) || jobType == systemTest); if ( ! completion.isPresent() && jobStateIsAmong(application, jobType, idle)) jobs.add(deploymentJob(application, versions, application.change(), jobType, reason, availableSince)); } return jobs; } private List<JobType> jobsOf(Collection<Step> steps) { return steps.stream().flatMap(step -> step.zones().stream()).map(order::toJob).collect(toList()); } private List<Step> testStepsOf(Application application) { return application.deploymentSpec().steps().isEmpty() ? singletonList(new DeploymentSpec.DeclaredZone(test)) : application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(test) || step.deploysTo(staging)) .collect(toList()); } private List<Step> productionStepsOf(Application application) { return application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(prod) || step.zones().isEmpty()) .collect(toList()); } private Job deploymentJob(Application application, Versions versions, Change change, JobType jobType, String reason, Instant availableSince) { boolean isRetry = application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::jobError) .filter(JobError.outOfCapacity::equals).isPresent(); if (isRetry) reason += "; retrying on out of capacity"; JobRun triggering = JobRun.triggering(versions.targetPlatform, versions.targetApplication, versions.sourcePlatform, versions.sourceApplication, reason, clock.instant()); return new Job(application, triggering, jobType, availableSince, isRetry, change.application().isPresent()); } private Versions versions(Application application, Change change, Optional<Deployment> deployment) { return new Versions(targetPlatform(application, change, deployment), targetApplication(application, change, deployment), deployment.map(Deployment::version), deployment.map(Deployment::applicationVersion)); } private Version targetPlatform(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::version), change.platform()) .orElse(application.oldestDeployedPlatform() .orElse(controller.systemVersion())); } private ApplicationVersion targetApplication(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::applicationVersion), change.application()) .orElse(application.oldestDeployedApplication() .orElse(application.deploymentJobs().jobStatus().get(component).lastSuccess().get().application())); } private static class Job extends BuildJob { private final JobType jobType; private final JobRun triggering; private final Instant availableSince; private final boolean isRetry; private final boolean isApplicationUpgrade; private Job(Application application, JobRun triggering, JobType jobType, Instant availableSince, boolean isRetry, boolean isApplicationUpgrade) { super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()); this.jobType = jobType; this.triggering = triggering; this.availableSince = availableSince; this.isRetry = isRetry; this.isApplicationUpgrade = isApplicationUpgrade; } JobType jobType() { return jobType; } Instant availableSince() { return availableSince; } boolean isRetry() { return isRetry; } boolean applicationUpgrade() { return isApplicationUpgrade; } } private static class Versions { private final Version targetPlatform; private final ApplicationVersion targetApplication; private final Optional<Version> sourcePlatform; private final Optional<ApplicationVersion> sourceApplication; private Versions(Version targetPlatform, ApplicationVersion targetApplication, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication) { this.targetPlatform = targetPlatform; this.targetApplication = targetApplication; this.sourcePlatform = sourcePlatform; this.sourceApplication = sourceApplication; } @Override public String toString() { return String.format("platform %s%s, application %s%s", sourcePlatform.filter(source -> ! source.equals(targetPlatform)) .map(source -> source + " -> ").orElse(""), targetPlatform, sourceApplication.filter(source -> ! source.equals(targetApplication)) .map(source -> source.id() + " -> ").orElse(""), targetApplication.id()); } } }
class DeploymentTrigger { private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentOrder order; private final BuildService buildService; public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) { Objects.requireNonNull(controller, "controller cannot be null"); Objects.requireNonNull(curator, "curator cannot be null"); Objects.requireNonNull(clock, "clock cannot be null"); this.controller = controller; this.clock = clock; this.order = new DeploymentOrder(controller::system); this.buildService = buildService; } public DeploymentOrder deploymentOrder() { return order; } /** * Records information when a job completes (successfully or not). This information is used when deciding what to * trigger next. */ public void notifyOfCompletion(JobReport report) { log.log(LogLevel.INFO, String.format("Notified of %s for %s of %s (%d).", report.jobError().map(e -> e.toString() + " error") .orElse("success"), report.jobType(), report.applicationId(), report.projectId())); if ( ! applications().get(report.applicationId()).isPresent()) { log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } applications().lockOrThrow(report.applicationId(), application -> { JobRun triggering; if (report.jobType() == component) { ApplicationVersion applicationVersion = ApplicationVersion.from(report.sourceRevision().get(), report.buildNumber()); triggering = JobRun.triggering(controller.systemVersion(), applicationVersion, Optional .empty(), Optional.empty(), "Application commit", clock.instant()); if (report.success()) { if (acceptNewApplicationVersion(application)) application = application.withChange(application.change().with(applicationVersion)) .withOutstandingChange(Change.empty()); else application = application.withOutstandingChange(Change.of(applicationVersion)); } } else { triggering = application.deploymentJobs().statusOf(report.jobType()).flatMap(JobStatus::lastTriggered) .orElseThrow(() -> new IllegalStateException("Notified of completion of " + report.jobType().jobName() + " for " + report.applicationId() + ", but that has neither been triggered nor deployed")); } application = application.withJobCompletion(report.projectId(), report.jobType(), triggering.completion(report.buildNumber(), clock.instant()), report.jobError()); application = application.withChange(remainingChange(application)); applications().store(application); }); } /** Returns a map of jobs that are scheduled to be run, grouped by the job type */ public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() { return computeReadyJobs().stream().collect(groupingBy(Job::jobType)); } /** * Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs. * * Only one job is triggered each run for test jobs, since their environments have limited capacity. */ public long triggerReadyJobs() { return computeReadyJobs().stream() .collect(partitioningBy(job -> job.jobType().isTest())) .entrySet().stream() .flatMap(entry -> (entry.getKey() ? entry.getValue().stream() .sorted(comparing(Job::isRetry) .thenComparing(Job::applicationUpgrade) .reversed() .thenComparing(Job::availableSince)) .collect(groupingBy(Job::jobType)) : entry.getValue().stream() .collect(groupingBy(Job::applicationId))) .values().stream() .map(jobs -> (Supplier<Long>) jobs.stream() .filter(this::trigger) .limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count)) .parallel().map(Supplier::get).reduce(0L, Long::sum); } /** * Attempts to trigger the given job for the given application and returns the outcome. * * If the build service can not find the given job, or claims it is illegal to trigger it, * the project id is removed from the application owning the job, to prevent further trigger attemps. */ public boolean trigger(Job job) { log.log(LogLevel.INFO, String.format("Triggering %s: %s", job, job.triggering)); try { buildService.trigger(job); applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withJobTriggering(job.jobType, job.triggering))); return true; } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e); if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException) applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withProjectId(OptionalLong.empty()))); return false; } } /** Force triggering of a job for given application. */ public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType, String user) { Application application = applications().require(applicationId); if (jobType == component) { buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); return singletonList(component); } Versions versions = versions(application, application.change(), deploymentFor(application, jobType)); String reason = "Job triggered manually by " + user; return (jobType.isProduction() && ! isTested(application, versions) ? testJobs(application, versions, reason, clock.instant()).stream() : Stream.of(deploymentJob(application, versions, application.change(), jobType, reason, clock.instant()))) .peek(this::trigger) .map(Job::jobType).collect(toList()); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already has an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application); }); } /** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .filter(__ -> keepApplicationChange) .map(Change::of) .orElse(Change.empty()))); }); } private ApplicationController applications() { return controller.applications(); } private Optional<JobRun> successOn(Application application, JobType jobType, Versions versions) { return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .filter(run -> targetsMatch(versions, run)); } private Optional<Deployment> deploymentFor(Application application, JobType jobType) { return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()).get())); } private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) { return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2; } /** Returns the set of all jobs which have changes to propagate from the upstream steps. */ private List<Job> computeReadyJobs() { return ApplicationList.from(applications().asList()) .notPullRequest() .withProjectId() .deploying() .idList().stream() .map(this::computeReadyJobs) .flatMap(Collection::stream) .collect(toList()); } /** * Finds the next step to trigger for the given application, if any, and returns these as a list. */ private List<JobType> runningProductionJobs(Application application) { return application.deploymentJobs().jobStatus().keySet().parallelStream() .filter(JobType::isProduction) .filter(job -> isRunning(application, job)) .collect(toList()); } /** Returns whether the given job is currently running; false if completed since last triggered, asking the build service otherwise. */ private boolean isRunning(Application application, JobType jobType) { return ! application.deploymentJobs().statusOf(jobType) .flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))) .orElse(false) && EnumSet.of(running, queued).contains(jobStateOf(application, jobType)); } private JobState jobStateOf(Application application, JobType jobType) { return buildService.stateOf(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); } /** * Returns whether the given change is complete for the given application for the given job. * * Any job is complete if the given change is already successful on that job. * A production job is also considered complete if its current change is strictly dominated by what * is already deployed in its zone, i.e., no parts of the change are upgrades, and the full current * change for the application downgrades the deployment, which is an acknowledgement that the deployed * version is broken somehow, such that the job may be locked in failure until a new version is released. */ private boolean isComplete(Change change, Application application, JobType jobType) { Optional<Deployment> existingDeployment = deploymentFor(application, jobType); return successOn(application, jobType, versions(application, change, existingDeployment)).isPresent() || jobType.isProduction() && existingDeployment.map(deployment -> ! isUpgrade(change, deployment) && isDowngrade(application.change(), deployment)) .orElse(false); } private static boolean isUpgrade(Change change, Deployment deployment) { return change.upgrades(deployment.version()) || change.upgrades(deployment.applicationVersion()); } private static boolean isDowngrade(Change change, Deployment deployment) { return change.downgrades(deployment.version()) || change.downgrades(deployment.applicationVersion()); } private boolean isTested(Application application, Versions versions) { return testedAt(application, versions).isPresent() || alreadyTriggered(application, versions); } private Optional<Instant> testedAt(Application application, Versions versions) { Optional<JobRun> testRun = successOn(application, systemTest, versions); Optional<JobRun> stagingRun = successOn(application, stagingTest, versions) .filter(run -> sourcesMatchIfPresent(versions, run)); return max(testRun.map(JobRun::at), stagingRun.map(JobRun::at)) .filter(__ -> testRun.isPresent() && stagingRun.isPresent()); } private boolean alreadyTriggered(Application application, Versions versions) { return application.deploymentJobs().jobStatus().values().stream() .filter(job -> job.type().isProduction()) .anyMatch(job -> job.lastTriggered() .filter(run -> targetsMatch(versions, run)) .filter(run -> sourcesMatchIfPresent(versions, run)) .isPresent()); } /** If the given state's sources are present and differ from its targets, returns whether they are equal to those * of the given job run. */ private static boolean sourcesMatchIfPresent(Versions versions, JobRun jobRun) { return ( ! versions.sourcePlatform.filter(version -> ! version.equals(versions.targetPlatform)).isPresent() || versions.sourcePlatform.equals(jobRun.sourcePlatform())) && ( ! versions.sourceApplication.filter(version -> ! version.equals(versions.targetApplication)).isPresent() || versions.sourceApplication.equals(jobRun.sourceApplication())); } private static boolean targetsMatch(Versions versions, JobRun jobRun) { return versions.targetPlatform.equals(jobRun.platform()) && versions.targetApplication.equals(jobRun.application()); } private boolean acceptNewApplicationVersion(Application application) { if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; return ! application.changeAt(clock.instant()).platform().isPresent(); } private Change remainingChange(Application application) { List<JobType> jobs = productionStepsOf(application).isEmpty() ? jobsOf(testStepsOf(application)) : jobsOf(productionStepsOf(application)); Change change = application.change(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutApplication(), application, job))) change = change.withoutPlatform(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutPlatform(), application, job))) change = change.withoutApplication(); return change; } /** * Returns the list of test jobs that should run now, and that need to succeed on the given versions for it to be considered tested. */ private List<Job> testJobs(Application application, Versions versions, String reason, Instant availableSince) { List<Job> jobs = new ArrayList<>(); for (JobType jobType : jobsOf(testStepsOf(application))) { Optional<JobRun> completion = successOn(application, jobType, versions) .filter(run -> sourcesMatchIfPresent(versions, run) || jobType == systemTest); if ( ! completion.isPresent() && jobStateOf(application, jobType) == idle) jobs.add(deploymentJob(application, versions, application.change(), jobType, reason, availableSince)); } return jobs; } private List<JobType> jobsOf(Collection<Step> steps) { return steps.stream().flatMap(step -> step.zones().stream()).map(order::toJob).collect(toList()); } private List<Step> testStepsOf(Application application) { return application.deploymentSpec().steps().isEmpty() ? singletonList(new DeploymentSpec.DeclaredZone(test)) : application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(test) || step.deploysTo(staging)) .collect(toList()); } private List<Step> productionStepsOf(Application application) { return application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(prod) || step.zones().isEmpty()) .collect(toList()); } private Job deploymentJob(Application application, Versions versions, Change change, JobType jobType, String reason, Instant availableSince) { boolean isRetry = application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::jobError) .filter(JobError.outOfCapacity::equals).isPresent(); if (isRetry) reason += "; retrying on out of capacity"; JobRun triggering = JobRun.triggering(versions.targetPlatform, versions.targetApplication, versions.sourcePlatform, versions.sourceApplication, reason, clock.instant()); return new Job(application, triggering, jobType, availableSince, isRetry, change.application().isPresent()); } private Versions versions(Application application, Change change, Optional<Deployment> deployment) { return new Versions(targetPlatform(application, change, deployment), targetApplication(application, change, deployment), deployment.map(Deployment::version), deployment.map(Deployment::applicationVersion)); } private Version targetPlatform(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::version), change.platform()) .orElse(application.oldestDeployedPlatform() .orElse(controller.systemVersion())); } private ApplicationVersion targetApplication(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::applicationVersion), change.application()) .orElse(application.oldestDeployedApplication() .orElse(application.deploymentJobs().jobStatus().get(component).lastSuccess().get().application())); } private static class Job extends BuildJob { private final JobType jobType; private final JobRun triggering; private final Instant availableSince; private final boolean isRetry; private final boolean isApplicationUpgrade; private Job(Application application, JobRun triggering, JobType jobType, Instant availableSince, boolean isRetry, boolean isApplicationUpgrade) { super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()); this.jobType = jobType; this.triggering = triggering; this.availableSince = availableSince; this.isRetry = isRetry; this.isApplicationUpgrade = isApplicationUpgrade; } JobType jobType() { return jobType; } Instant availableSince() { return availableSince; } boolean isRetry() { return isRetry; } boolean applicationUpgrade() { return isApplicationUpgrade; } } private static class Versions { private final Version targetPlatform; private final ApplicationVersion targetApplication; private final Optional<Version> sourcePlatform; private final Optional<ApplicationVersion> sourceApplication; private Versions(Version targetPlatform, ApplicationVersion targetApplication, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication) { this.targetPlatform = targetPlatform; this.targetApplication = targetApplication; this.sourcePlatform = sourcePlatform; this.sourceApplication = sourceApplication; } @Override public String toString() { return String.format("platform %s%s, application %s%s", sourcePlatform.filter(source -> ! source.equals(targetPlatform)) .map(source -> source + " -> ").orElse(""), targetPlatform, sourceApplication.filter(source -> ! source.equals(targetApplication)) .map(source -> source.id() + " -> ").orElse(""), targetApplication.id()); } } }
There was a reason for this change, it confused both me and others reading the code. It even confuses IntelliJs static analysis.
private List<Job> computeReadyJobs(ApplicationId id) { List<Job> jobs = new ArrayList<>(); applications().get(id).ifPresent(application -> { Change change = application.changeAt(clock.instant()); Optional<Instant> completedAt = max(application.deploymentJobs().statusOf(systemTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at)), application.deploymentJobs().statusOf(stagingTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at))); String reason = "New change available"; List<Job> testJobs = null; if (change.isPresent()) for (Step step : productionStepsOf(application)) { Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet()); Map<Optional<Instant>, List<JobType>> jobsByCompletion = stepJobs.stream().collect(groupingBy(job -> completedAt(change, application, job))); if (jobsByCompletion.containsKey(Optional.empty())) { for (JobType job : jobsByCompletion.get(Optional.empty())) { Versions versions = versions(application, change, deploymentFor(application, job)); if (isTested(application, versions)) { if ( completedAt.isPresent() && jobStateIsAmong(application, job, idle) && stepJobs.containsAll(runningProductionJobs(application))) jobs.add(deploymentJob(application, versions, change, job, reason, completedAt.get())); if ( ! alreadyTriggered(application, versions)) testJobs = emptyList(); } else if (testJobs == null) { testJobs = testJobs(application, versions, String.format("Testing deployment for %s (%s)", job.jobName(), versions.toString()), completedAt.orElse(clock.instant())); } } completedAt = Optional.empty(); } else { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = jobsByCompletion.keySet().stream().map(Optional::get).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } } if (testJobs == null) testJobs = testJobs(application, versions(application, application.change(), Optional.empty()), "Testing last changes outside prod", clock.instant()); jobs.addAll(testJobs); }); return jobs; }
if (jobsByCompletion.containsKey(Optional.empty())) {
private List<Job> computeReadyJobs(ApplicationId id) { List<Job> jobs = new ArrayList<>(); applications().get(id).ifPresent(application -> { Change change = application.changeAt(clock.instant()); Optional<Instant> completedAt = max(application.deploymentJobs().statusOf(systemTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at)), application.deploymentJobs().statusOf(stagingTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at))); String reason = "New change available"; List<Job> testJobs = null; if (change.isPresent()) for (Step step : productionStepsOf(application)) { Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet()); List<JobType> remainingJobs = stepJobs.stream().filter(job -> ! isComplete(change, application, job)).collect(toList()); if ( ! remainingJobs.isEmpty()) { for (JobType job : remainingJobs) { Versions versions = versions(application, change, deploymentFor(application, job)); if (isTested(application, versions)) { if ( completedAt.isPresent() && jobStateOf(application, job) == idle && stepJobs.containsAll(runningProductionJobs(application))) jobs.add(deploymentJob(application, versions, change, job, reason, completedAt.get())); if ( ! alreadyTriggered(application, versions)) testJobs = emptyList(); } else if (testJobs == null) { testJobs = testJobs(application, versions, String.format("Testing deployment for %s (%s)", job.jobName(), versions.toString()), completedAt.orElse(clock.instant())); } } completedAt = Optional.empty(); } else { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = stepJobs.stream().map(job -> application.deploymentJobs().statusOf(job).get().lastCompleted().get().at()).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } } if (testJobs == null) testJobs = testJobs(application, versions(application, application.change(), Optional.empty()), "Testing last changes outside prod", clock.instant()); jobs.addAll(testJobs); }); return jobs; }
class DeploymentTrigger { private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentOrder order; private final BuildService buildService; public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) { Objects.requireNonNull(controller, "controller cannot be null"); Objects.requireNonNull(curator, "curator cannot be null"); Objects.requireNonNull(clock, "clock cannot be null"); this.controller = controller; this.clock = clock; this.order = new DeploymentOrder(controller::system); this.buildService = buildService; } public DeploymentOrder deploymentOrder() { return order; } /** * Records information when a job completes (successfully or not). This information is used when deciding what to * trigger next. */ public void notifyOfCompletion(JobReport report) { log.log(LogLevel.INFO, String.format("Notified of %s for %s of %s (%d).", report.jobError().map(e -> e.toString() + " error") .orElse("success"), report.jobType(), report.applicationId(), report.projectId())); if ( ! applications().get(report.applicationId()).isPresent()) { log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } applications().lockOrThrow(report.applicationId(), application -> { JobRun triggering; if (report.jobType() == component) { ApplicationVersion applicationVersion = ApplicationVersion.from(report.sourceRevision().get(), report.buildNumber()); triggering = JobRun.triggering(controller.systemVersion(), applicationVersion, Optional .empty(), Optional.empty(), "Application commit", clock.instant()); if (report.success()) { if (acceptNewApplicationVersion(application)) application = application.withChange(application.change().with(applicationVersion)) .withOutstandingChange(Change.empty()); else application = application.withOutstandingChange(Change.of(applicationVersion)); } } else { triggering = application.deploymentJobs().statusOf(report.jobType()).flatMap(JobStatus::lastTriggered) .orElseThrow(() -> new IllegalStateException("Notified of completion of " + report.jobType().jobName() + " for " + report.applicationId() + ", but that has neither been triggered nor deployed")); } application = application.withJobCompletion(report.projectId(), report.jobType(), triggering.completion(report.buildNumber(), clock.instant()), report.jobError()); application = application.withChange(remainingChange(application)); applications().store(application); }); } /** Returns a map of jobs that are scheduled to be run, grouped by the job type */ public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() { return computeReadyJobs().stream().collect(groupingBy(Job::jobType)); } /** * Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs. * * Only one job is triggered each run for test jobs, since their environments have limited capacity. */ public long triggerReadyJobs() { return computeReadyJobs().stream() .collect(partitioningBy(job -> job.jobType().isTest())) .entrySet().stream() .flatMap(entry -> (entry.getKey() ? entry.getValue().stream() .sorted(comparing(Job::isRetry) .thenComparing(Job::applicationUpgrade) .reversed() .thenComparing(Job::availableSince)) .collect(groupingBy(Job::jobType)) : entry.getValue().stream() .collect(groupingBy(Job::applicationId))) .values().stream() .map(jobs -> (Supplier<Long>) jobs.stream() .filter(this::trigger) .limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count)) .parallel().map(Supplier::get).reduce(0L, Long::sum); } /** * Attempts to trigger the given job for the given application and returns the outcome. * * If the build service can not find the given job, or claims it is illegal to trigger it, * the project id is removed from the application owning the job, to prevent further trigger attemps. */ public boolean trigger(Job job) { log.log(LogLevel.INFO, String.format("Triggering %s: %s", job, job.triggering)); try { buildService.trigger(job); applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withJobTriggering(job.jobType, job.triggering))); return true; } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e); if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException) applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withProjectId(OptionalLong.empty()))); return false; } } /** Force triggering of a job for given application. */ public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType, String user) { Application application = applications().require(applicationId); if (jobType == component) { buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); return singletonList(component); } Versions versions = versions(application, application.change(), deploymentFor(application, jobType)); String reason = "Job triggered manually by " + user; return (jobType.isProduction() && ! isTested(application, versions) ? testJobs(application, versions, reason, clock.instant()).stream() : Stream.of(deploymentJob(application, versions, application.change(), jobType, reason, clock.instant()))) .peek(this::trigger) .map(Job::jobType).collect(toList()); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already has an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application); }); } /** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .filter(__ -> keepApplicationChange) .map(Change::of) .orElse(Change.empty()))); }); } private ApplicationController applications() { return controller.applications(); } private Optional<JobRun> successOn(Application application, JobType jobType, Versions versions) { return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .filter(run -> targetsMatch(versions, run)); } private Optional<Deployment> deploymentFor(Application application, JobType jobType) { return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()).get())); } private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) { return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2; } /** Returns the set of all jobs which have changes to propagate from the upstream steps. */ private List<Job> computeReadyJobs() { return ApplicationList.from(applications().asList()) .notPullRequest() .withProjectId() .deploying() .idList().stream() .map(this::computeReadyJobs) .flatMap(Collection::stream) .collect(toList()); } /** * Finds the next step to trigger for the given application, if any, and returns these as a list. */ private List<JobType> runningProductionJobs(Application application) { return application.deploymentJobs().jobStatus().keySet().parallelStream() .filter(JobType::isProduction) .filter(job -> isRunning(application, job)) .collect(toList()); } /** Returns whether the given job is currently running; false if completed since last triggered, asking the build service otherwise. */ private boolean isRunning(Application application, JobType jobType) { return ! application.deploymentJobs().statusOf(jobType) .flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))) .orElse(false) && jobStateIsAmong(application, jobType, running, queued); } private boolean jobStateIsAmong(Application application, JobType jobType, JobState... states) { return Arrays.asList(states).contains(buildService.stateOf(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()))); } /** * Returns the instant when the given change is complete for the given application for the given job. * * Any job is complete if the given change is already successful on that job. * A production job is also considered complete if its current change is strictly dominated by what * is already deployed in its zone, i.e., no parts of the change are upgrades, and the full current * change for the application downgrades the deployment, which is an acknowledgement that the deployed * version is broken somehow, such that the job may be locked in failure until a new version is released. */ private Optional<Instant> completedAt(Change change, Application application, JobType jobType) { Versions versions = versions(application, change, deploymentFor(application, jobType)); Optional<JobRun> lastSuccess = successOn(application, jobType, versions); if (lastSuccess.isPresent() || ! jobType.isProduction()) return lastSuccess.map(JobRun::at); return deploymentFor(application, jobType) .filter(deployment -> ! isUpgrade(change, deployment) && isDowngrade(application.change(), deployment)) .map(Deployment::at); } private static boolean isUpgrade(Change change, Deployment deployment) { return change.upgrades(deployment.version()) || change.upgrades(deployment.applicationVersion()); } private static boolean isDowngrade(Change change, Deployment deployment) { return change.downgrades(deployment.version()) || change.downgrades(deployment.applicationVersion()); } private boolean isTested(Application application, Versions versions) { return testedAt(application, versions).isPresent() || alreadyTriggered(application, versions); } private Optional<Instant> testedAt(Application application, Versions versions) { Optional<JobRun> testRun = successOn(application, systemTest, versions); Optional<JobRun> stagingRun = successOn(application, stagingTest, versions) .filter(run -> sourcesMatchIfPresent(versions, run)); return max(testRun.map(JobRun::at), stagingRun.map(JobRun::at)) .filter(__ -> testRun.isPresent() && stagingRun.isPresent()); } private boolean alreadyTriggered(Application application, Versions versions) { return application.deploymentJobs().jobStatus().values().stream() .filter(job -> job.type().isProduction()) .anyMatch(job -> job.lastTriggered() .filter(run -> targetsMatch(versions, run)) .filter(run -> sourcesMatchIfPresent(versions, run)) .isPresent()); } /** If the given state's sources are present and differ from its targets, returns whether they are equal to those * of the given job run. */ private static boolean sourcesMatchIfPresent(Versions versions, JobRun jobRun) { return ( ! versions.sourcePlatform.filter(version -> ! version.equals(versions.targetPlatform)).isPresent() || versions.sourcePlatform.equals(jobRun.sourcePlatform())) && ( ! versions.sourceApplication.filter(version -> ! version.equals(versions.targetApplication)).isPresent() || versions.sourceApplication.equals(jobRun.sourceApplication())); } private static boolean targetsMatch(Versions versions, JobRun jobRun) { return versions.targetPlatform.equals(jobRun.platform()) && versions.targetApplication.equals(jobRun.application()); } private boolean acceptNewApplicationVersion(Application application) { if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; return ! application.changeAt(clock.instant()).platform().isPresent(); } private Change remainingChange(Application application) { List<JobType> jobs = productionStepsOf(application).isEmpty() ? jobsOf(testStepsOf(application)) : jobsOf(productionStepsOf(application)); Change change = application.change(); if (jobs.stream().allMatch(job -> completedAt(application.change().withoutApplication(), application, job).isPresent())) change = change.withoutPlatform(); if (jobs.stream().allMatch(job -> completedAt(application.change().withoutPlatform(), application, job).isPresent())) change = change.withoutApplication(); return change; } /** * Returns the list of test jobs that should run now, and that need to succeed on the given versions for it to be considered tested. */ private List<Job> testJobs(Application application, Versions versions, String reason, Instant availableSince) { List<Job> jobs = new ArrayList<>(); for (JobType jobType : jobsOf(testStepsOf(application))) { Optional<JobRun> completion = successOn(application, jobType, versions) .filter(run -> sourcesMatchIfPresent(versions, run) || jobType == systemTest); if ( ! completion.isPresent() && jobStateIsAmong(application, jobType, idle)) jobs.add(deploymentJob(application, versions, application.change(), jobType, reason, availableSince)); } return jobs; } private List<JobType> jobsOf(Collection<Step> steps) { return steps.stream().flatMap(step -> step.zones().stream()).map(order::toJob).collect(toList()); } private List<Step> testStepsOf(Application application) { return application.deploymentSpec().steps().isEmpty() ? singletonList(new DeploymentSpec.DeclaredZone(test)) : application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(test) || step.deploysTo(staging)) .collect(toList()); } private List<Step> productionStepsOf(Application application) { return application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(prod) || step.zones().isEmpty()) .collect(toList()); } private Job deploymentJob(Application application, Versions versions, Change change, JobType jobType, String reason, Instant availableSince) { boolean isRetry = application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::jobError) .filter(JobError.outOfCapacity::equals).isPresent(); if (isRetry) reason += "; retrying on out of capacity"; JobRun triggering = JobRun.triggering(versions.targetPlatform, versions.targetApplication, versions.sourcePlatform, versions.sourceApplication, reason, clock.instant()); return new Job(application, triggering, jobType, availableSince, isRetry, change.application().isPresent()); } private Versions versions(Application application, Change change, Optional<Deployment> deployment) { return new Versions(targetPlatform(application, change, deployment), targetApplication(application, change, deployment), deployment.map(Deployment::version), deployment.map(Deployment::applicationVersion)); } private Version targetPlatform(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::version), change.platform()) .orElse(application.oldestDeployedPlatform() .orElse(controller.systemVersion())); } private ApplicationVersion targetApplication(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::applicationVersion), change.application()) .orElse(application.oldestDeployedApplication() .orElse(application.deploymentJobs().jobStatus().get(component).lastSuccess().get().application())); } private static class Job extends BuildJob { private final JobType jobType; private final JobRun triggering; private final Instant availableSince; private final boolean isRetry; private final boolean isApplicationUpgrade; private Job(Application application, JobRun triggering, JobType jobType, Instant availableSince, boolean isRetry, boolean isApplicationUpgrade) { super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()); this.jobType = jobType; this.triggering = triggering; this.availableSince = availableSince; this.isRetry = isRetry; this.isApplicationUpgrade = isApplicationUpgrade; } JobType jobType() { return jobType; } Instant availableSince() { return availableSince; } boolean isRetry() { return isRetry; } boolean applicationUpgrade() { return isApplicationUpgrade; } } private static class Versions { private final Version targetPlatform; private final ApplicationVersion targetApplication; private final Optional<Version> sourcePlatform; private final Optional<ApplicationVersion> sourceApplication; private Versions(Version targetPlatform, ApplicationVersion targetApplication, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication) { this.targetPlatform = targetPlatform; this.targetApplication = targetApplication; this.sourcePlatform = sourcePlatform; this.sourceApplication = sourceApplication; } @Override public String toString() { return String.format("platform %s%s, application %s%s", sourcePlatform.filter(source -> ! source.equals(targetPlatform)) .map(source -> source + " -> ").orElse(""), targetPlatform, sourceApplication.filter(source -> ! source.equals(targetApplication)) .map(source -> source.id() + " -> ").orElse(""), targetApplication.id()); } } }
class DeploymentTrigger { private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentOrder order; private final BuildService buildService; public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) { Objects.requireNonNull(controller, "controller cannot be null"); Objects.requireNonNull(curator, "curator cannot be null"); Objects.requireNonNull(clock, "clock cannot be null"); this.controller = controller; this.clock = clock; this.order = new DeploymentOrder(controller::system); this.buildService = buildService; } public DeploymentOrder deploymentOrder() { return order; } /** * Records information when a job completes (successfully or not). This information is used when deciding what to * trigger next. */ public void notifyOfCompletion(JobReport report) { log.log(LogLevel.INFO, String.format("Notified of %s for %s of %s (%d).", report.jobError().map(e -> e.toString() + " error") .orElse("success"), report.jobType(), report.applicationId(), report.projectId())); if ( ! applications().get(report.applicationId()).isPresent()) { log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } applications().lockOrThrow(report.applicationId(), application -> { JobRun triggering; if (report.jobType() == component) { ApplicationVersion applicationVersion = ApplicationVersion.from(report.sourceRevision().get(), report.buildNumber()); triggering = JobRun.triggering(controller.systemVersion(), applicationVersion, Optional .empty(), Optional.empty(), "Application commit", clock.instant()); if (report.success()) { if (acceptNewApplicationVersion(application)) application = application.withChange(application.change().with(applicationVersion)) .withOutstandingChange(Change.empty()); else application = application.withOutstandingChange(Change.of(applicationVersion)); } } else { triggering = application.deploymentJobs().statusOf(report.jobType()).flatMap(JobStatus::lastTriggered) .orElseThrow(() -> new IllegalStateException("Notified of completion of " + report.jobType().jobName() + " for " + report.applicationId() + ", but that has neither been triggered nor deployed")); } application = application.withJobCompletion(report.projectId(), report.jobType(), triggering.completion(report.buildNumber(), clock.instant()), report.jobError()); application = application.withChange(remainingChange(application)); applications().store(application); }); } /** Returns a map of jobs that are scheduled to be run, grouped by the job type */ public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() { return computeReadyJobs().stream().collect(groupingBy(Job::jobType)); } /** * Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs. * * Only one job is triggered each run for test jobs, since their environments have limited capacity. */ public long triggerReadyJobs() { return computeReadyJobs().stream() .collect(partitioningBy(job -> job.jobType().isTest())) .entrySet().stream() .flatMap(entry -> (entry.getKey() ? entry.getValue().stream() .sorted(comparing(Job::isRetry) .thenComparing(Job::applicationUpgrade) .reversed() .thenComparing(Job::availableSince)) .collect(groupingBy(Job::jobType)) : entry.getValue().stream() .collect(groupingBy(Job::applicationId))) .values().stream() .map(jobs -> (Supplier<Long>) jobs.stream() .filter(this::trigger) .limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count)) .parallel().map(Supplier::get).reduce(0L, Long::sum); } /** * Attempts to trigger the given job for the given application and returns the outcome. * * If the build service can not find the given job, or claims it is illegal to trigger it, * the project id is removed from the application owning the job, to prevent further trigger attemps. */ public boolean trigger(Job job) { log.log(LogLevel.INFO, String.format("Triggering %s: %s", job, job.triggering)); try { buildService.trigger(job); applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withJobTriggering(job.jobType, job.triggering))); return true; } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e); if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException) applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withProjectId(OptionalLong.empty()))); return false; } } /** Force triggering of a job for given application. */ public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType, String user) { Application application = applications().require(applicationId); if (jobType == component) { buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); return singletonList(component); } Versions versions = versions(application, application.change(), deploymentFor(application, jobType)); String reason = "Job triggered manually by " + user; return (jobType.isProduction() && ! isTested(application, versions) ? testJobs(application, versions, reason, clock.instant()).stream() : Stream.of(deploymentJob(application, versions, application.change(), jobType, reason, clock.instant()))) .peek(this::trigger) .map(Job::jobType).collect(toList()); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already has an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application); }); } /** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .filter(__ -> keepApplicationChange) .map(Change::of) .orElse(Change.empty()))); }); } private ApplicationController applications() { return controller.applications(); } private Optional<JobRun> successOn(Application application, JobType jobType, Versions versions) { return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .filter(run -> targetsMatch(versions, run)); } private Optional<Deployment> deploymentFor(Application application, JobType jobType) { return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()).get())); } private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) { return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2; } /** Returns the set of all jobs which have changes to propagate from the upstream steps. */ private List<Job> computeReadyJobs() { return ApplicationList.from(applications().asList()) .notPullRequest() .withProjectId() .deploying() .idList().stream() .map(this::computeReadyJobs) .flatMap(Collection::stream) .collect(toList()); } /** * Finds the next step to trigger for the given application, if any, and returns these as a list. */ private List<JobType> runningProductionJobs(Application application) { return application.deploymentJobs().jobStatus().keySet().parallelStream() .filter(JobType::isProduction) .filter(job -> isRunning(application, job)) .collect(toList()); } /** Returns whether the given job is currently running; false if completed since last triggered, asking the build service otherwise. */ private boolean isRunning(Application application, JobType jobType) { return ! application.deploymentJobs().statusOf(jobType) .flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))) .orElse(false) && EnumSet.of(running, queued).contains(jobStateOf(application, jobType)); } private JobState jobStateOf(Application application, JobType jobType) { return buildService.stateOf(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); } /** * Returns whether the given change is complete for the given application for the given job. * * Any job is complete if the given change is already successful on that job. * A production job is also considered complete if its current change is strictly dominated by what * is already deployed in its zone, i.e., no parts of the change are upgrades, and the full current * change for the application downgrades the deployment, which is an acknowledgement that the deployed * version is broken somehow, such that the job may be locked in failure until a new version is released. */ private boolean isComplete(Change change, Application application, JobType jobType) { Optional<Deployment> existingDeployment = deploymentFor(application, jobType); return successOn(application, jobType, versions(application, change, existingDeployment)).isPresent() || jobType.isProduction() && existingDeployment.map(deployment -> ! isUpgrade(change, deployment) && isDowngrade(application.change(), deployment)) .orElse(false); } private static boolean isUpgrade(Change change, Deployment deployment) { return change.upgrades(deployment.version()) || change.upgrades(deployment.applicationVersion()); } private static boolean isDowngrade(Change change, Deployment deployment) { return change.downgrades(deployment.version()) || change.downgrades(deployment.applicationVersion()); } private boolean isTested(Application application, Versions versions) { return testedAt(application, versions).isPresent() || alreadyTriggered(application, versions); } private Optional<Instant> testedAt(Application application, Versions versions) { Optional<JobRun> testRun = successOn(application, systemTest, versions); Optional<JobRun> stagingRun = successOn(application, stagingTest, versions) .filter(run -> sourcesMatchIfPresent(versions, run)); return max(testRun.map(JobRun::at), stagingRun.map(JobRun::at)) .filter(__ -> testRun.isPresent() && stagingRun.isPresent()); } private boolean alreadyTriggered(Application application, Versions versions) { return application.deploymentJobs().jobStatus().values().stream() .filter(job -> job.type().isProduction()) .anyMatch(job -> job.lastTriggered() .filter(run -> targetsMatch(versions, run)) .filter(run -> sourcesMatchIfPresent(versions, run)) .isPresent()); } /** If the given state's sources are present and differ from its targets, returns whether they are equal to those * of the given job run. */ private static boolean sourcesMatchIfPresent(Versions versions, JobRun jobRun) { return ( ! versions.sourcePlatform.filter(version -> ! version.equals(versions.targetPlatform)).isPresent() || versions.sourcePlatform.equals(jobRun.sourcePlatform())) && ( ! versions.sourceApplication.filter(version -> ! version.equals(versions.targetApplication)).isPresent() || versions.sourceApplication.equals(jobRun.sourceApplication())); } private static boolean targetsMatch(Versions versions, JobRun jobRun) { return versions.targetPlatform.equals(jobRun.platform()) && versions.targetApplication.equals(jobRun.application()); } private boolean acceptNewApplicationVersion(Application application) { if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; return ! application.changeAt(clock.instant()).platform().isPresent(); } private Change remainingChange(Application application) { List<JobType> jobs = productionStepsOf(application).isEmpty() ? jobsOf(testStepsOf(application)) : jobsOf(productionStepsOf(application)); Change change = application.change(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutApplication(), application, job))) change = change.withoutPlatform(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutPlatform(), application, job))) change = change.withoutApplication(); return change; } /** * Returns the list of test jobs that should run now, and that need to succeed on the given versions for it to be considered tested. */ private List<Job> testJobs(Application application, Versions versions, String reason, Instant availableSince) { List<Job> jobs = new ArrayList<>(); for (JobType jobType : jobsOf(testStepsOf(application))) { Optional<JobRun> completion = successOn(application, jobType, versions) .filter(run -> sourcesMatchIfPresent(versions, run) || jobType == systemTest); if ( ! completion.isPresent() && jobStateOf(application, jobType) == idle) jobs.add(deploymentJob(application, versions, application.change(), jobType, reason, availableSince)); } return jobs; } private List<JobType> jobsOf(Collection<Step> steps) { return steps.stream().flatMap(step -> step.zones().stream()).map(order::toJob).collect(toList()); } private List<Step> testStepsOf(Application application) { return application.deploymentSpec().steps().isEmpty() ? singletonList(new DeploymentSpec.DeclaredZone(test)) : application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(test) || step.deploysTo(staging)) .collect(toList()); } private List<Step> productionStepsOf(Application application) { return application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(prod) || step.zones().isEmpty()) .collect(toList()); } private Job deploymentJob(Application application, Versions versions, Change change, JobType jobType, String reason, Instant availableSince) { boolean isRetry = application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::jobError) .filter(JobError.outOfCapacity::equals).isPresent(); if (isRetry) reason += "; retrying on out of capacity"; JobRun triggering = JobRun.triggering(versions.targetPlatform, versions.targetApplication, versions.sourcePlatform, versions.sourceApplication, reason, clock.instant()); return new Job(application, triggering, jobType, availableSince, isRetry, change.application().isPresent()); } private Versions versions(Application application, Change change, Optional<Deployment> deployment) { return new Versions(targetPlatform(application, change, deployment), targetApplication(application, change, deployment), deployment.map(Deployment::version), deployment.map(Deployment::applicationVersion)); } private Version targetPlatform(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::version), change.platform()) .orElse(application.oldestDeployedPlatform() .orElse(controller.systemVersion())); } private ApplicationVersion targetApplication(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::applicationVersion), change.application()) .orElse(application.oldestDeployedApplication() .orElse(application.deploymentJobs().jobStatus().get(component).lastSuccess().get().application())); } private static class Job extends BuildJob { private final JobType jobType; private final JobRun triggering; private final Instant availableSince; private final boolean isRetry; private final boolean isApplicationUpgrade; private Job(Application application, JobRun triggering, JobType jobType, Instant availableSince, boolean isRetry, boolean isApplicationUpgrade) { super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()); this.jobType = jobType; this.triggering = triggering; this.availableSince = availableSince; this.isRetry = isRetry; this.isApplicationUpgrade = isApplicationUpgrade; } JobType jobType() { return jobType; } Instant availableSince() { return availableSince; } boolean isRetry() { return isRetry; } boolean applicationUpgrade() { return isApplicationUpgrade; } } private static class Versions { private final Version targetPlatform; private final ApplicationVersion targetApplication; private final Optional<Version> sourcePlatform; private final Optional<ApplicationVersion> sourceApplication; private Versions(Version targetPlatform, ApplicationVersion targetApplication, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication) { this.targetPlatform = targetPlatform; this.targetApplication = targetApplication; this.sourcePlatform = sourcePlatform; this.sourceApplication = sourceApplication; } @Override public String toString() { return String.format("platform %s%s, application %s%s", sourcePlatform.filter(source -> ! source.equals(targetPlatform)) .map(source -> source + " -> ").orElse(""), targetPlatform, sourceApplication.filter(source -> ! source.equals(targetApplication)) .map(source -> source.id() + " -> ").orElse(""), targetApplication.id()); } } }
Generics type arguments are not inferable, no, because of `Map.get(Object key)` and `Map.containsKey(Object key)`. However, all `Optional.empty()` are equal -- they are _empty_. They hold no value for you to use, and there's no possibility of harm even though IntelliJ complains. As for readability, do you really prefer: 1. Sort jobs by time of completion; if they haven't completed, pretend they completed in the 1970s (or a very long time from now) 2. If any jobs completed in the 1970s (or a very long time from now), they didn't really complete, so then ... to 1. Sort jobs by optional time of completion. 2. If any jobs have no time of completion, then ... I'm absolutely certain I prefer the latter, even though it puts an extra type argument in the code. `Instant` has no value for `never`, and using `the 70s` or `the far distant future` as `never` is not a good thing. `Optional<Instant>`, however, has a perfect value for `never`, namely, `empty`. The keys aren't even forced into this `Optional` -- they are already `Optional`s from where they are computed. I doubt you dispute the return value of that method.
private List<Job> computeReadyJobs(ApplicationId id) { List<Job> jobs = new ArrayList<>(); applications().get(id).ifPresent(application -> { Change change = application.changeAt(clock.instant()); Optional<Instant> completedAt = max(application.deploymentJobs().statusOf(systemTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at)), application.deploymentJobs().statusOf(stagingTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at))); String reason = "New change available"; List<Job> testJobs = null; if (change.isPresent()) for (Step step : productionStepsOf(application)) { Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet()); Map<Optional<Instant>, List<JobType>> jobsByCompletion = stepJobs.stream().collect(groupingBy(job -> completedAt(change, application, job))); if (jobsByCompletion.containsKey(Optional.empty())) { for (JobType job : jobsByCompletion.get(Optional.empty())) { Versions versions = versions(application, change, deploymentFor(application, job)); if (isTested(application, versions)) { if ( completedAt.isPresent() && jobStateIsAmong(application, job, idle) && stepJobs.containsAll(runningProductionJobs(application))) jobs.add(deploymentJob(application, versions, change, job, reason, completedAt.get())); if ( ! alreadyTriggered(application, versions)) testJobs = emptyList(); } else if (testJobs == null) { testJobs = testJobs(application, versions, String.format("Testing deployment for %s (%s)", job.jobName(), versions.toString()), completedAt.orElse(clock.instant())); } } completedAt = Optional.empty(); } else { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = jobsByCompletion.keySet().stream().map(Optional::get).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } } if (testJobs == null) testJobs = testJobs(application, versions(application, application.change(), Optional.empty()), "Testing last changes outside prod", clock.instant()); jobs.addAll(testJobs); }); return jobs; }
if (jobsByCompletion.containsKey(Optional.empty())) {
private List<Job> computeReadyJobs(ApplicationId id) { List<Job> jobs = new ArrayList<>(); applications().get(id).ifPresent(application -> { Change change = application.changeAt(clock.instant()); Optional<Instant> completedAt = max(application.deploymentJobs().statusOf(systemTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at)), application.deploymentJobs().statusOf(stagingTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at))); String reason = "New change available"; List<Job> testJobs = null; if (change.isPresent()) for (Step step : productionStepsOf(application)) { Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet()); List<JobType> remainingJobs = stepJobs.stream().filter(job -> ! isComplete(change, application, job)).collect(toList()); if ( ! remainingJobs.isEmpty()) { for (JobType job : remainingJobs) { Versions versions = versions(application, change, deploymentFor(application, job)); if (isTested(application, versions)) { if ( completedAt.isPresent() && jobStateOf(application, job) == idle && stepJobs.containsAll(runningProductionJobs(application))) jobs.add(deploymentJob(application, versions, change, job, reason, completedAt.get())); if ( ! alreadyTriggered(application, versions)) testJobs = emptyList(); } else if (testJobs == null) { testJobs = testJobs(application, versions, String.format("Testing deployment for %s (%s)", job.jobName(), versions.toString()), completedAt.orElse(clock.instant())); } } completedAt = Optional.empty(); } else { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = stepJobs.stream().map(job -> application.deploymentJobs().statusOf(job).get().lastCompleted().get().at()).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } } if (testJobs == null) testJobs = testJobs(application, versions(application, application.change(), Optional.empty()), "Testing last changes outside prod", clock.instant()); jobs.addAll(testJobs); }); return jobs; }
class DeploymentTrigger { private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentOrder order; private final BuildService buildService; public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) { Objects.requireNonNull(controller, "controller cannot be null"); Objects.requireNonNull(curator, "curator cannot be null"); Objects.requireNonNull(clock, "clock cannot be null"); this.controller = controller; this.clock = clock; this.order = new DeploymentOrder(controller::system); this.buildService = buildService; } public DeploymentOrder deploymentOrder() { return order; } /** * Records information when a job completes (successfully or not). This information is used when deciding what to * trigger next. */ public void notifyOfCompletion(JobReport report) { log.log(LogLevel.INFO, String.format("Notified of %s for %s of %s (%d).", report.jobError().map(e -> e.toString() + " error") .orElse("success"), report.jobType(), report.applicationId(), report.projectId())); if ( ! applications().get(report.applicationId()).isPresent()) { log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } applications().lockOrThrow(report.applicationId(), application -> { JobRun triggering; if (report.jobType() == component) { ApplicationVersion applicationVersion = ApplicationVersion.from(report.sourceRevision().get(), report.buildNumber()); triggering = JobRun.triggering(controller.systemVersion(), applicationVersion, Optional .empty(), Optional.empty(), "Application commit", clock.instant()); if (report.success()) { if (acceptNewApplicationVersion(application)) application = application.withChange(application.change().with(applicationVersion)) .withOutstandingChange(Change.empty()); else application = application.withOutstandingChange(Change.of(applicationVersion)); } } else { triggering = application.deploymentJobs().statusOf(report.jobType()).flatMap(JobStatus::lastTriggered) .orElseThrow(() -> new IllegalStateException("Notified of completion of " + report.jobType().jobName() + " for " + report.applicationId() + ", but that has neither been triggered nor deployed")); } application = application.withJobCompletion(report.projectId(), report.jobType(), triggering.completion(report.buildNumber(), clock.instant()), report.jobError()); application = application.withChange(remainingChange(application)); applications().store(application); }); } /** Returns a map of jobs that are scheduled to be run, grouped by the job type */ public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() { return computeReadyJobs().stream().collect(groupingBy(Job::jobType)); } /** * Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs. * * Only one job is triggered each run for test jobs, since their environments have limited capacity. */ public long triggerReadyJobs() { return computeReadyJobs().stream() .collect(partitioningBy(job -> job.jobType().isTest())) .entrySet().stream() .flatMap(entry -> (entry.getKey() ? entry.getValue().stream() .sorted(comparing(Job::isRetry) .thenComparing(Job::applicationUpgrade) .reversed() .thenComparing(Job::availableSince)) .collect(groupingBy(Job::jobType)) : entry.getValue().stream() .collect(groupingBy(Job::applicationId))) .values().stream() .map(jobs -> (Supplier<Long>) jobs.stream() .filter(this::trigger) .limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count)) .parallel().map(Supplier::get).reduce(0L, Long::sum); } /** * Attempts to trigger the given job for the given application and returns the outcome. * * If the build service can not find the given job, or claims it is illegal to trigger it, * the project id is removed from the application owning the job, to prevent further trigger attemps. */ public boolean trigger(Job job) { log.log(LogLevel.INFO, String.format("Triggering %s: %s", job, job.triggering)); try { buildService.trigger(job); applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withJobTriggering(job.jobType, job.triggering))); return true; } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e); if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException) applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withProjectId(OptionalLong.empty()))); return false; } } /** Force triggering of a job for given application. */ public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType, String user) { Application application = applications().require(applicationId); if (jobType == component) { buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); return singletonList(component); } Versions versions = versions(application, application.change(), deploymentFor(application, jobType)); String reason = "Job triggered manually by " + user; return (jobType.isProduction() && ! isTested(application, versions) ? testJobs(application, versions, reason, clock.instant()).stream() : Stream.of(deploymentJob(application, versions, application.change(), jobType, reason, clock.instant()))) .peek(this::trigger) .map(Job::jobType).collect(toList()); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already has an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application); }); } /** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .filter(__ -> keepApplicationChange) .map(Change::of) .orElse(Change.empty()))); }); } private ApplicationController applications() { return controller.applications(); } private Optional<JobRun> successOn(Application application, JobType jobType, Versions versions) { return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .filter(run -> targetsMatch(versions, run)); } private Optional<Deployment> deploymentFor(Application application, JobType jobType) { return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()).get())); } private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) { return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2; } /** Returns the set of all jobs which have changes to propagate from the upstream steps. */ private List<Job> computeReadyJobs() { return ApplicationList.from(applications().asList()) .notPullRequest() .withProjectId() .deploying() .idList().stream() .map(this::computeReadyJobs) .flatMap(Collection::stream) .collect(toList()); } /** * Finds the next step to trigger for the given application, if any, and returns these as a list. */ private List<JobType> runningProductionJobs(Application application) { return application.deploymentJobs().jobStatus().keySet().parallelStream() .filter(JobType::isProduction) .filter(job -> isRunning(application, job)) .collect(toList()); } /** Returns whether the given job is currently running; false if completed since last triggered, asking the build service otherwise. */ private boolean isRunning(Application application, JobType jobType) { return ! application.deploymentJobs().statusOf(jobType) .flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))) .orElse(false) && jobStateIsAmong(application, jobType, running, queued); } private boolean jobStateIsAmong(Application application, JobType jobType, JobState... states) { return Arrays.asList(states).contains(buildService.stateOf(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()))); } /** * Returns the instant when the given change is complete for the given application for the given job. * * Any job is complete if the given change is already successful on that job. * A production job is also considered complete if its current change is strictly dominated by what * is already deployed in its zone, i.e., no parts of the change are upgrades, and the full current * change for the application downgrades the deployment, which is an acknowledgement that the deployed * version is broken somehow, such that the job may be locked in failure until a new version is released. */ private Optional<Instant> completedAt(Change change, Application application, JobType jobType) { Versions versions = versions(application, change, deploymentFor(application, jobType)); Optional<JobRun> lastSuccess = successOn(application, jobType, versions); if (lastSuccess.isPresent() || ! jobType.isProduction()) return lastSuccess.map(JobRun::at); return deploymentFor(application, jobType) .filter(deployment -> ! isUpgrade(change, deployment) && isDowngrade(application.change(), deployment)) .map(Deployment::at); } private static boolean isUpgrade(Change change, Deployment deployment) { return change.upgrades(deployment.version()) || change.upgrades(deployment.applicationVersion()); } private static boolean isDowngrade(Change change, Deployment deployment) { return change.downgrades(deployment.version()) || change.downgrades(deployment.applicationVersion()); } private boolean isTested(Application application, Versions versions) { return testedAt(application, versions).isPresent() || alreadyTriggered(application, versions); } private Optional<Instant> testedAt(Application application, Versions versions) { Optional<JobRun> testRun = successOn(application, systemTest, versions); Optional<JobRun> stagingRun = successOn(application, stagingTest, versions) .filter(run -> sourcesMatchIfPresent(versions, run)); return max(testRun.map(JobRun::at), stagingRun.map(JobRun::at)) .filter(__ -> testRun.isPresent() && stagingRun.isPresent()); } private boolean alreadyTriggered(Application application, Versions versions) { return application.deploymentJobs().jobStatus().values().stream() .filter(job -> job.type().isProduction()) .anyMatch(job -> job.lastTriggered() .filter(run -> targetsMatch(versions, run)) .filter(run -> sourcesMatchIfPresent(versions, run)) .isPresent()); } /** If the given state's sources are present and differ from its targets, returns whether they are equal to those * of the given job run. */ private static boolean sourcesMatchIfPresent(Versions versions, JobRun jobRun) { return ( ! versions.sourcePlatform.filter(version -> ! version.equals(versions.targetPlatform)).isPresent() || versions.sourcePlatform.equals(jobRun.sourcePlatform())) && ( ! versions.sourceApplication.filter(version -> ! version.equals(versions.targetApplication)).isPresent() || versions.sourceApplication.equals(jobRun.sourceApplication())); } private static boolean targetsMatch(Versions versions, JobRun jobRun) { return versions.targetPlatform.equals(jobRun.platform()) && versions.targetApplication.equals(jobRun.application()); } private boolean acceptNewApplicationVersion(Application application) { if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; return ! application.changeAt(clock.instant()).platform().isPresent(); } private Change remainingChange(Application application) { List<JobType> jobs = productionStepsOf(application).isEmpty() ? jobsOf(testStepsOf(application)) : jobsOf(productionStepsOf(application)); Change change = application.change(); if (jobs.stream().allMatch(job -> completedAt(application.change().withoutApplication(), application, job).isPresent())) change = change.withoutPlatform(); if (jobs.stream().allMatch(job -> completedAt(application.change().withoutPlatform(), application, job).isPresent())) change = change.withoutApplication(); return change; } /** * Returns the list of test jobs that should run now, and that need to succeed on the given versions for it to be considered tested. */ private List<Job> testJobs(Application application, Versions versions, String reason, Instant availableSince) { List<Job> jobs = new ArrayList<>(); for (JobType jobType : jobsOf(testStepsOf(application))) { Optional<JobRun> completion = successOn(application, jobType, versions) .filter(run -> sourcesMatchIfPresent(versions, run) || jobType == systemTest); if ( ! completion.isPresent() && jobStateIsAmong(application, jobType, idle)) jobs.add(deploymentJob(application, versions, application.change(), jobType, reason, availableSince)); } return jobs; } private List<JobType> jobsOf(Collection<Step> steps) { return steps.stream().flatMap(step -> step.zones().stream()).map(order::toJob).collect(toList()); } private List<Step> testStepsOf(Application application) { return application.deploymentSpec().steps().isEmpty() ? singletonList(new DeploymentSpec.DeclaredZone(test)) : application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(test) || step.deploysTo(staging)) .collect(toList()); } private List<Step> productionStepsOf(Application application) { return application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(prod) || step.zones().isEmpty()) .collect(toList()); } private Job deploymentJob(Application application, Versions versions, Change change, JobType jobType, String reason, Instant availableSince) { boolean isRetry = application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::jobError) .filter(JobError.outOfCapacity::equals).isPresent(); if (isRetry) reason += "; retrying on out of capacity"; JobRun triggering = JobRun.triggering(versions.targetPlatform, versions.targetApplication, versions.sourcePlatform, versions.sourceApplication, reason, clock.instant()); return new Job(application, triggering, jobType, availableSince, isRetry, change.application().isPresent()); } private Versions versions(Application application, Change change, Optional<Deployment> deployment) { return new Versions(targetPlatform(application, change, deployment), targetApplication(application, change, deployment), deployment.map(Deployment::version), deployment.map(Deployment::applicationVersion)); } private Version targetPlatform(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::version), change.platform()) .orElse(application.oldestDeployedPlatform() .orElse(controller.systemVersion())); } private ApplicationVersion targetApplication(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::applicationVersion), change.application()) .orElse(application.oldestDeployedApplication() .orElse(application.deploymentJobs().jobStatus().get(component).lastSuccess().get().application())); } private static class Job extends BuildJob { private final JobType jobType; private final JobRun triggering; private final Instant availableSince; private final boolean isRetry; private final boolean isApplicationUpgrade; private Job(Application application, JobRun triggering, JobType jobType, Instant availableSince, boolean isRetry, boolean isApplicationUpgrade) { super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()); this.jobType = jobType; this.triggering = triggering; this.availableSince = availableSince; this.isRetry = isRetry; this.isApplicationUpgrade = isApplicationUpgrade; } JobType jobType() { return jobType; } Instant availableSince() { return availableSince; } boolean isRetry() { return isRetry; } boolean applicationUpgrade() { return isApplicationUpgrade; } } private static class Versions { private final Version targetPlatform; private final ApplicationVersion targetApplication; private final Optional<Version> sourcePlatform; private final Optional<ApplicationVersion> sourceApplication; private Versions(Version targetPlatform, ApplicationVersion targetApplication, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication) { this.targetPlatform = targetPlatform; this.targetApplication = targetApplication; this.sourcePlatform = sourcePlatform; this.sourceApplication = sourceApplication; } @Override public String toString() { return String.format("platform %s%s, application %s%s", sourcePlatform.filter(source -> ! source.equals(targetPlatform)) .map(source -> source + " -> ").orElse(""), targetPlatform, sourceApplication.filter(source -> ! source.equals(targetApplication)) .map(source -> source.id() + " -> ").orElse(""), targetApplication.id()); } } }
class DeploymentTrigger { private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentOrder order; private final BuildService buildService; public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) { Objects.requireNonNull(controller, "controller cannot be null"); Objects.requireNonNull(curator, "curator cannot be null"); Objects.requireNonNull(clock, "clock cannot be null"); this.controller = controller; this.clock = clock; this.order = new DeploymentOrder(controller::system); this.buildService = buildService; } public DeploymentOrder deploymentOrder() { return order; } /** * Records information when a job completes (successfully or not). This information is used when deciding what to * trigger next. */ public void notifyOfCompletion(JobReport report) { log.log(LogLevel.INFO, String.format("Notified of %s for %s of %s (%d).", report.jobError().map(e -> e.toString() + " error") .orElse("success"), report.jobType(), report.applicationId(), report.projectId())); if ( ! applications().get(report.applicationId()).isPresent()) { log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } applications().lockOrThrow(report.applicationId(), application -> { JobRun triggering; if (report.jobType() == component) { ApplicationVersion applicationVersion = ApplicationVersion.from(report.sourceRevision().get(), report.buildNumber()); triggering = JobRun.triggering(controller.systemVersion(), applicationVersion, Optional .empty(), Optional.empty(), "Application commit", clock.instant()); if (report.success()) { if (acceptNewApplicationVersion(application)) application = application.withChange(application.change().with(applicationVersion)) .withOutstandingChange(Change.empty()); else application = application.withOutstandingChange(Change.of(applicationVersion)); } } else { triggering = application.deploymentJobs().statusOf(report.jobType()).flatMap(JobStatus::lastTriggered) .orElseThrow(() -> new IllegalStateException("Notified of completion of " + report.jobType().jobName() + " for " + report.applicationId() + ", but that has neither been triggered nor deployed")); } application = application.withJobCompletion(report.projectId(), report.jobType(), triggering.completion(report.buildNumber(), clock.instant()), report.jobError()); application = application.withChange(remainingChange(application)); applications().store(application); }); } /** Returns a map of jobs that are scheduled to be run, grouped by the job type */ public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() { return computeReadyJobs().stream().collect(groupingBy(Job::jobType)); } /** * Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs. * * Only one job is triggered each run for test jobs, since their environments have limited capacity. */ public long triggerReadyJobs() { return computeReadyJobs().stream() .collect(partitioningBy(job -> job.jobType().isTest())) .entrySet().stream() .flatMap(entry -> (entry.getKey() ? entry.getValue().stream() .sorted(comparing(Job::isRetry) .thenComparing(Job::applicationUpgrade) .reversed() .thenComparing(Job::availableSince)) .collect(groupingBy(Job::jobType)) : entry.getValue().stream() .collect(groupingBy(Job::applicationId))) .values().stream() .map(jobs -> (Supplier<Long>) jobs.stream() .filter(this::trigger) .limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count)) .parallel().map(Supplier::get).reduce(0L, Long::sum); } /** * Attempts to trigger the given job for the given application and returns the outcome. * * If the build service can not find the given job, or claims it is illegal to trigger it, * the project id is removed from the application owning the job, to prevent further trigger attemps. */ public boolean trigger(Job job) { log.log(LogLevel.INFO, String.format("Triggering %s: %s", job, job.triggering)); try { buildService.trigger(job); applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withJobTriggering(job.jobType, job.triggering))); return true; } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e); if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException) applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withProjectId(OptionalLong.empty()))); return false; } } /** Force triggering of a job for given application. */ public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType, String user) { Application application = applications().require(applicationId); if (jobType == component) { buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); return singletonList(component); } Versions versions = versions(application, application.change(), deploymentFor(application, jobType)); String reason = "Job triggered manually by " + user; return (jobType.isProduction() && ! isTested(application, versions) ? testJobs(application, versions, reason, clock.instant()).stream() : Stream.of(deploymentJob(application, versions, application.change(), jobType, reason, clock.instant()))) .peek(this::trigger) .map(Job::jobType).collect(toList()); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already has an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application); }); } /** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .filter(__ -> keepApplicationChange) .map(Change::of) .orElse(Change.empty()))); }); } private ApplicationController applications() { return controller.applications(); } private Optional<JobRun> successOn(Application application, JobType jobType, Versions versions) { return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .filter(run -> targetsMatch(versions, run)); } private Optional<Deployment> deploymentFor(Application application, JobType jobType) { return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()).get())); } private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) { return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2; } /** Returns the set of all jobs which have changes to propagate from the upstream steps. */ private List<Job> computeReadyJobs() { return ApplicationList.from(applications().asList()) .notPullRequest() .withProjectId() .deploying() .idList().stream() .map(this::computeReadyJobs) .flatMap(Collection::stream) .collect(toList()); } /** * Finds the next step to trigger for the given application, if any, and returns these as a list. */ private List<JobType> runningProductionJobs(Application application) { return application.deploymentJobs().jobStatus().keySet().parallelStream() .filter(JobType::isProduction) .filter(job -> isRunning(application, job)) .collect(toList()); } /** Returns whether the given job is currently running; false if completed since last triggered, asking the build service otherwise. */ private boolean isRunning(Application application, JobType jobType) { return ! application.deploymentJobs().statusOf(jobType) .flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))) .orElse(false) && EnumSet.of(running, queued).contains(jobStateOf(application, jobType)); } private JobState jobStateOf(Application application, JobType jobType) { return buildService.stateOf(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); } /** * Returns whether the given change is complete for the given application for the given job. * * Any job is complete if the given change is already successful on that job. * A production job is also considered complete if its current change is strictly dominated by what * is already deployed in its zone, i.e., no parts of the change are upgrades, and the full current * change for the application downgrades the deployment, which is an acknowledgement that the deployed * version is broken somehow, such that the job may be locked in failure until a new version is released. */ private boolean isComplete(Change change, Application application, JobType jobType) { Optional<Deployment> existingDeployment = deploymentFor(application, jobType); return successOn(application, jobType, versions(application, change, existingDeployment)).isPresent() || jobType.isProduction() && existingDeployment.map(deployment -> ! isUpgrade(change, deployment) && isDowngrade(application.change(), deployment)) .orElse(false); } private static boolean isUpgrade(Change change, Deployment deployment) { return change.upgrades(deployment.version()) || change.upgrades(deployment.applicationVersion()); } private static boolean isDowngrade(Change change, Deployment deployment) { return change.downgrades(deployment.version()) || change.downgrades(deployment.applicationVersion()); } private boolean isTested(Application application, Versions versions) { return testedAt(application, versions).isPresent() || alreadyTriggered(application, versions); } private Optional<Instant> testedAt(Application application, Versions versions) { Optional<JobRun> testRun = successOn(application, systemTest, versions); Optional<JobRun> stagingRun = successOn(application, stagingTest, versions) .filter(run -> sourcesMatchIfPresent(versions, run)); return max(testRun.map(JobRun::at), stagingRun.map(JobRun::at)) .filter(__ -> testRun.isPresent() && stagingRun.isPresent()); } private boolean alreadyTriggered(Application application, Versions versions) { return application.deploymentJobs().jobStatus().values().stream() .filter(job -> job.type().isProduction()) .anyMatch(job -> job.lastTriggered() .filter(run -> targetsMatch(versions, run)) .filter(run -> sourcesMatchIfPresent(versions, run)) .isPresent()); } /** If the given state's sources are present and differ from its targets, returns whether they are equal to those * of the given job run. */ private static boolean sourcesMatchIfPresent(Versions versions, JobRun jobRun) { return ( ! versions.sourcePlatform.filter(version -> ! version.equals(versions.targetPlatform)).isPresent() || versions.sourcePlatform.equals(jobRun.sourcePlatform())) && ( ! versions.sourceApplication.filter(version -> ! version.equals(versions.targetApplication)).isPresent() || versions.sourceApplication.equals(jobRun.sourceApplication())); } private static boolean targetsMatch(Versions versions, JobRun jobRun) { return versions.targetPlatform.equals(jobRun.platform()) && versions.targetApplication.equals(jobRun.application()); } private boolean acceptNewApplicationVersion(Application application) { if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; return ! application.changeAt(clock.instant()).platform().isPresent(); } private Change remainingChange(Application application) { List<JobType> jobs = productionStepsOf(application).isEmpty() ? jobsOf(testStepsOf(application)) : jobsOf(productionStepsOf(application)); Change change = application.change(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutApplication(), application, job))) change = change.withoutPlatform(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutPlatform(), application, job))) change = change.withoutApplication(); return change; } /** * Returns the list of test jobs that should run now, and that need to succeed on the given versions for it to be considered tested. */ private List<Job> testJobs(Application application, Versions versions, String reason, Instant availableSince) { List<Job> jobs = new ArrayList<>(); for (JobType jobType : jobsOf(testStepsOf(application))) { Optional<JobRun> completion = successOn(application, jobType, versions) .filter(run -> sourcesMatchIfPresent(versions, run) || jobType == systemTest); if ( ! completion.isPresent() && jobStateOf(application, jobType) == idle) jobs.add(deploymentJob(application, versions, application.change(), jobType, reason, availableSince)); } return jobs; } private List<JobType> jobsOf(Collection<Step> steps) { return steps.stream().flatMap(step -> step.zones().stream()).map(order::toJob).collect(toList()); } private List<Step> testStepsOf(Application application) { return application.deploymentSpec().steps().isEmpty() ? singletonList(new DeploymentSpec.DeclaredZone(test)) : application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(test) || step.deploysTo(staging)) .collect(toList()); } private List<Step> productionStepsOf(Application application) { return application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(prod) || step.zones().isEmpty()) .collect(toList()); } private Job deploymentJob(Application application, Versions versions, Change change, JobType jobType, String reason, Instant availableSince) { boolean isRetry = application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::jobError) .filter(JobError.outOfCapacity::equals).isPresent(); if (isRetry) reason += "; retrying on out of capacity"; JobRun triggering = JobRun.triggering(versions.targetPlatform, versions.targetApplication, versions.sourcePlatform, versions.sourceApplication, reason, clock.instant()); return new Job(application, triggering, jobType, availableSince, isRetry, change.application().isPresent()); } private Versions versions(Application application, Change change, Optional<Deployment> deployment) { return new Versions(targetPlatform(application, change, deployment), targetApplication(application, change, deployment), deployment.map(Deployment::version), deployment.map(Deployment::applicationVersion)); } private Version targetPlatform(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::version), change.platform()) .orElse(application.oldestDeployedPlatform() .orElse(controller.systemVersion())); } private ApplicationVersion targetApplication(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::applicationVersion), change.application()) .orElse(application.oldestDeployedApplication() .orElse(application.deploymentJobs().jobStatus().get(component).lastSuccess().get().application())); } private static class Job extends BuildJob { private final JobType jobType; private final JobRun triggering; private final Instant availableSince; private final boolean isRetry; private final boolean isApplicationUpgrade; private Job(Application application, JobRun triggering, JobType jobType, Instant availableSince, boolean isRetry, boolean isApplicationUpgrade) { super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()); this.jobType = jobType; this.triggering = triggering; this.availableSince = availableSince; this.isRetry = isRetry; this.isApplicationUpgrade = isApplicationUpgrade; } JobType jobType() { return jobType; } Instant availableSince() { return availableSince; } boolean isRetry() { return isRetry; } boolean applicationUpgrade() { return isApplicationUpgrade; } } private static class Versions { private final Version targetPlatform; private final ApplicationVersion targetApplication; private final Optional<Version> sourcePlatform; private final Optional<ApplicationVersion> sourceApplication; private Versions(Version targetPlatform, ApplicationVersion targetApplication, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication) { this.targetPlatform = targetPlatform; this.targetApplication = targetApplication; this.sourcePlatform = sourcePlatform; this.sourceApplication = sourceApplication; } @Override public String toString() { return String.format("platform %s%s, application %s%s", sourcePlatform.filter(source -> ! source.equals(targetPlatform)) .map(source -> source + " -> ").orElse(""), targetPlatform, sourceApplication.filter(source -> ! source.equals(targetApplication)) .map(source -> source.id() + " -> ").orElse(""), targetApplication.id()); } } }
Sure.
private List<Job> computeReadyJobs(ApplicationId id) { List<Job> jobs = new ArrayList<>(); applications().get(id).ifPresent(application -> { Change change = application.changeAt(clock.instant()); Optional<Instant> completedAt = max(application.deploymentJobs().statusOf(systemTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at)), application.deploymentJobs().statusOf(stagingTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at))); String reason = "New change available"; List<Job> testJobs = null; if (change.isPresent()) for (Step step : productionStepsOf(application)) { Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet()); Map<Optional<Instant>, List<JobType>> jobsByCompletion = stepJobs.stream().collect(groupingBy(job -> completedAt(change, application, job))); if (jobsByCompletion.containsKey(Optional.empty())) { for (JobType job : jobsByCompletion.get(Optional.empty())) { Versions versions = versions(application, change, deploymentFor(application, job)); if (isTested(application, versions)) { if ( completedAt.isPresent() && jobStateIsAmong(application, job, idle) && stepJobs.containsAll(runningProductionJobs(application))) jobs.add(deploymentJob(application, versions, change, job, reason, completedAt.get())); if ( ! alreadyTriggered(application, versions)) testJobs = emptyList(); } else if (testJobs == null) { testJobs = testJobs(application, versions, String.format("Testing deployment for %s (%s)", job.jobName(), versions.toString()), completedAt.orElse(clock.instant())); } } completedAt = Optional.empty(); } else { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = jobsByCompletion.keySet().stream().map(Optional::get).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } } if (testJobs == null) testJobs = testJobs(application, versions(application, application.change(), Optional.empty()), "Testing last changes outside prod", clock.instant()); jobs.addAll(testJobs); }); return jobs; }
&& jobStateIsAmong(application, job, idle)
private List<Job> computeReadyJobs(ApplicationId id) { List<Job> jobs = new ArrayList<>(); applications().get(id).ifPresent(application -> { Change change = application.changeAt(clock.instant()); Optional<Instant> completedAt = max(application.deploymentJobs().statusOf(systemTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at)), application.deploymentJobs().statusOf(stagingTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at))); String reason = "New change available"; List<Job> testJobs = null; if (change.isPresent()) for (Step step : productionStepsOf(application)) { Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet()); List<JobType> remainingJobs = stepJobs.stream().filter(job -> ! isComplete(change, application, job)).collect(toList()); if ( ! remainingJobs.isEmpty()) { for (JobType job : remainingJobs) { Versions versions = versions(application, change, deploymentFor(application, job)); if (isTested(application, versions)) { if ( completedAt.isPresent() && jobStateOf(application, job) == idle && stepJobs.containsAll(runningProductionJobs(application))) jobs.add(deploymentJob(application, versions, change, job, reason, completedAt.get())); if ( ! alreadyTriggered(application, versions)) testJobs = emptyList(); } else if (testJobs == null) { testJobs = testJobs(application, versions, String.format("Testing deployment for %s (%s)", job.jobName(), versions.toString()), completedAt.orElse(clock.instant())); } } completedAt = Optional.empty(); } else { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = stepJobs.stream().map(job -> application.deploymentJobs().statusOf(job).get().lastCompleted().get().at()).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } } if (testJobs == null) testJobs = testJobs(application, versions(application, application.change(), Optional.empty()), "Testing last changes outside prod", clock.instant()); jobs.addAll(testJobs); }); return jobs; }
class DeploymentTrigger { private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentOrder order; private final BuildService buildService; public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) { Objects.requireNonNull(controller, "controller cannot be null"); Objects.requireNonNull(curator, "curator cannot be null"); Objects.requireNonNull(clock, "clock cannot be null"); this.controller = controller; this.clock = clock; this.order = new DeploymentOrder(controller::system); this.buildService = buildService; } public DeploymentOrder deploymentOrder() { return order; } /** * Records information when a job completes (successfully or not). This information is used when deciding what to * trigger next. */ public void notifyOfCompletion(JobReport report) { log.log(LogLevel.INFO, String.format("Notified of %s for %s of %s (%d).", report.jobError().map(e -> e.toString() + " error") .orElse("success"), report.jobType(), report.applicationId(), report.projectId())); if ( ! applications().get(report.applicationId()).isPresent()) { log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } applications().lockOrThrow(report.applicationId(), application -> { JobRun triggering; if (report.jobType() == component) { ApplicationVersion applicationVersion = ApplicationVersion.from(report.sourceRevision().get(), report.buildNumber()); triggering = JobRun.triggering(controller.systemVersion(), applicationVersion, Optional .empty(), Optional.empty(), "Application commit", clock.instant()); if (report.success()) { if (acceptNewApplicationVersion(application)) application = application.withChange(application.change().with(applicationVersion)) .withOutstandingChange(Change.empty()); else application = application.withOutstandingChange(Change.of(applicationVersion)); } } else { triggering = application.deploymentJobs().statusOf(report.jobType()).flatMap(JobStatus::lastTriggered) .orElseThrow(() -> new IllegalStateException("Notified of completion of " + report.jobType().jobName() + " for " + report.applicationId() + ", but that has neither been triggered nor deployed")); } application = application.withJobCompletion(report.projectId(), report.jobType(), triggering.completion(report.buildNumber(), clock.instant()), report.jobError()); application = application.withChange(remainingChange(application)); applications().store(application); }); } /** Returns a map of jobs that are scheduled to be run, grouped by the job type */ public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() { return computeReadyJobs().stream().collect(groupingBy(Job::jobType)); } /** * Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs. * * Only one job is triggered each run for test jobs, since their environments have limited capacity. */ public long triggerReadyJobs() { return computeReadyJobs().stream() .collect(partitioningBy(job -> job.jobType().isTest())) .entrySet().stream() .flatMap(entry -> (entry.getKey() ? entry.getValue().stream() .sorted(comparing(Job::isRetry) .thenComparing(Job::applicationUpgrade) .reversed() .thenComparing(Job::availableSince)) .collect(groupingBy(Job::jobType)) : entry.getValue().stream() .collect(groupingBy(Job::applicationId))) .values().stream() .map(jobs -> (Supplier<Long>) jobs.stream() .filter(this::trigger) .limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count)) .parallel().map(Supplier::get).reduce(0L, Long::sum); } /** * Attempts to trigger the given job for the given application and returns the outcome. * * If the build service can not find the given job, or claims it is illegal to trigger it, * the project id is removed from the application owning the job, to prevent further trigger attemps. */ public boolean trigger(Job job) { log.log(LogLevel.INFO, String.format("Triggering %s: %s", job, job.triggering)); try { buildService.trigger(job); applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withJobTriggering(job.jobType, job.triggering))); return true; } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e); if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException) applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withProjectId(OptionalLong.empty()))); return false; } } /** Force triggering of a job for given application. */ public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType, String user) { Application application = applications().require(applicationId); if (jobType == component) { buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); return singletonList(component); } Versions versions = versions(application, application.change(), deploymentFor(application, jobType)); String reason = "Job triggered manually by " + user; return (jobType.isProduction() && ! isTested(application, versions) ? testJobs(application, versions, reason, clock.instant()).stream() : Stream.of(deploymentJob(application, versions, application.change(), jobType, reason, clock.instant()))) .peek(this::trigger) .map(Job::jobType).collect(toList()); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already has an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application); }); } /** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .filter(__ -> keepApplicationChange) .map(Change::of) .orElse(Change.empty()))); }); } private ApplicationController applications() { return controller.applications(); } private Optional<JobRun> successOn(Application application, JobType jobType, Versions versions) { return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .filter(run -> targetsMatch(versions, run)); } private Optional<Deployment> deploymentFor(Application application, JobType jobType) { return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()).get())); } private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) { return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2; } /** Returns the set of all jobs which have changes to propagate from the upstream steps. */ private List<Job> computeReadyJobs() { return ApplicationList.from(applications().asList()) .notPullRequest() .withProjectId() .deploying() .idList().stream() .map(this::computeReadyJobs) .flatMap(Collection::stream) .collect(toList()); } /** * Finds the next step to trigger for the given application, if any, and returns these as a list. */ private List<JobType> runningProductionJobs(Application application) { return application.deploymentJobs().jobStatus().keySet().parallelStream() .filter(JobType::isProduction) .filter(job -> isRunning(application, job)) .collect(toList()); } /** Returns whether the given job is currently running; false if completed since last triggered, asking the build service otherwise. */ private boolean isRunning(Application application, JobType jobType) { return ! application.deploymentJobs().statusOf(jobType) .flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))) .orElse(false) && jobStateIsAmong(application, jobType, running, queued); } private boolean jobStateIsAmong(Application application, JobType jobType, JobState... states) { return Arrays.asList(states).contains(buildService.stateOf(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()))); } /** * Returns the instant when the given change is complete for the given application for the given job. * * Any job is complete if the given change is already successful on that job. * A production job is also considered complete if its current change is strictly dominated by what * is already deployed in its zone, i.e., no parts of the change are upgrades, and the full current * change for the application downgrades the deployment, which is an acknowledgement that the deployed * version is broken somehow, such that the job may be locked in failure until a new version is released. */ private Optional<Instant> completedAt(Change change, Application application, JobType jobType) { Versions versions = versions(application, change, deploymentFor(application, jobType)); Optional<JobRun> lastSuccess = successOn(application, jobType, versions); if (lastSuccess.isPresent() || ! jobType.isProduction()) return lastSuccess.map(JobRun::at); return deploymentFor(application, jobType) .filter(deployment -> ! isUpgrade(change, deployment) && isDowngrade(application.change(), deployment)) .map(Deployment::at); } private static boolean isUpgrade(Change change, Deployment deployment) { return change.upgrades(deployment.version()) || change.upgrades(deployment.applicationVersion()); } private static boolean isDowngrade(Change change, Deployment deployment) { return change.downgrades(deployment.version()) || change.downgrades(deployment.applicationVersion()); } private boolean isTested(Application application, Versions versions) { return testedAt(application, versions).isPresent() || alreadyTriggered(application, versions); } private Optional<Instant> testedAt(Application application, Versions versions) { Optional<JobRun> testRun = successOn(application, systemTest, versions); Optional<JobRun> stagingRun = successOn(application, stagingTest, versions) .filter(run -> sourcesMatchIfPresent(versions, run)); return max(testRun.map(JobRun::at), stagingRun.map(JobRun::at)) .filter(__ -> testRun.isPresent() && stagingRun.isPresent()); } private boolean alreadyTriggered(Application application, Versions versions) { return application.deploymentJobs().jobStatus().values().stream() .filter(job -> job.type().isProduction()) .anyMatch(job -> job.lastTriggered() .filter(run -> targetsMatch(versions, run)) .filter(run -> sourcesMatchIfPresent(versions, run)) .isPresent()); } /** If the given state's sources are present and differ from its targets, returns whether they are equal to those * of the given job run. */ private static boolean sourcesMatchIfPresent(Versions versions, JobRun jobRun) { return ( ! versions.sourcePlatform.filter(version -> ! version.equals(versions.targetPlatform)).isPresent() || versions.sourcePlatform.equals(jobRun.sourcePlatform())) && ( ! versions.sourceApplication.filter(version -> ! version.equals(versions.targetApplication)).isPresent() || versions.sourceApplication.equals(jobRun.sourceApplication())); } private static boolean targetsMatch(Versions versions, JobRun jobRun) { return versions.targetPlatform.equals(jobRun.platform()) && versions.targetApplication.equals(jobRun.application()); } private boolean acceptNewApplicationVersion(Application application) { if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; return ! application.changeAt(clock.instant()).platform().isPresent(); } private Change remainingChange(Application application) { List<JobType> jobs = productionStepsOf(application).isEmpty() ? jobsOf(testStepsOf(application)) : jobsOf(productionStepsOf(application)); Change change = application.change(); if (jobs.stream().allMatch(job -> completedAt(application.change().withoutApplication(), application, job).isPresent())) change = change.withoutPlatform(); if (jobs.stream().allMatch(job -> completedAt(application.change().withoutPlatform(), application, job).isPresent())) change = change.withoutApplication(); return change; } /** * Returns the list of test jobs that should run now, and that need to succeed on the given versions for it to be considered tested. */ private List<Job> testJobs(Application application, Versions versions, String reason, Instant availableSince) { List<Job> jobs = new ArrayList<>(); for (JobType jobType : jobsOf(testStepsOf(application))) { Optional<JobRun> completion = successOn(application, jobType, versions) .filter(run -> sourcesMatchIfPresent(versions, run) || jobType == systemTest); if ( ! completion.isPresent() && jobStateIsAmong(application, jobType, idle)) jobs.add(deploymentJob(application, versions, application.change(), jobType, reason, availableSince)); } return jobs; } private List<JobType> jobsOf(Collection<Step> steps) { return steps.stream().flatMap(step -> step.zones().stream()).map(order::toJob).collect(toList()); } private List<Step> testStepsOf(Application application) { return application.deploymentSpec().steps().isEmpty() ? singletonList(new DeploymentSpec.DeclaredZone(test)) : application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(test) || step.deploysTo(staging)) .collect(toList()); } private List<Step> productionStepsOf(Application application) { return application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(prod) || step.zones().isEmpty()) .collect(toList()); } private Job deploymentJob(Application application, Versions versions, Change change, JobType jobType, String reason, Instant availableSince) { boolean isRetry = application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::jobError) .filter(JobError.outOfCapacity::equals).isPresent(); if (isRetry) reason += "; retrying on out of capacity"; JobRun triggering = JobRun.triggering(versions.targetPlatform, versions.targetApplication, versions.sourcePlatform, versions.sourceApplication, reason, clock.instant()); return new Job(application, triggering, jobType, availableSince, isRetry, change.application().isPresent()); } private Versions versions(Application application, Change change, Optional<Deployment> deployment) { return new Versions(targetPlatform(application, change, deployment), targetApplication(application, change, deployment), deployment.map(Deployment::version), deployment.map(Deployment::applicationVersion)); } private Version targetPlatform(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::version), change.platform()) .orElse(application.oldestDeployedPlatform() .orElse(controller.systemVersion())); } private ApplicationVersion targetApplication(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::applicationVersion), change.application()) .orElse(application.oldestDeployedApplication() .orElse(application.deploymentJobs().jobStatus().get(component).lastSuccess().get().application())); } private static class Job extends BuildJob { private final JobType jobType; private final JobRun triggering; private final Instant availableSince; private final boolean isRetry; private final boolean isApplicationUpgrade; private Job(Application application, JobRun triggering, JobType jobType, Instant availableSince, boolean isRetry, boolean isApplicationUpgrade) { super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()); this.jobType = jobType; this.triggering = triggering; this.availableSince = availableSince; this.isRetry = isRetry; this.isApplicationUpgrade = isApplicationUpgrade; } JobType jobType() { return jobType; } Instant availableSince() { return availableSince; } boolean isRetry() { return isRetry; } boolean applicationUpgrade() { return isApplicationUpgrade; } } private static class Versions { private final Version targetPlatform; private final ApplicationVersion targetApplication; private final Optional<Version> sourcePlatform; private final Optional<ApplicationVersion> sourceApplication; private Versions(Version targetPlatform, ApplicationVersion targetApplication, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication) { this.targetPlatform = targetPlatform; this.targetApplication = targetApplication; this.sourcePlatform = sourcePlatform; this.sourceApplication = sourceApplication; } @Override public String toString() { return String.format("platform %s%s, application %s%s", sourcePlatform.filter(source -> ! source.equals(targetPlatform)) .map(source -> source + " -> ").orElse(""), targetPlatform, sourceApplication.filter(source -> ! source.equals(targetApplication)) .map(source -> source.id() + " -> ").orElse(""), targetApplication.id()); } } }
class DeploymentTrigger { private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentOrder order; private final BuildService buildService; public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) { Objects.requireNonNull(controller, "controller cannot be null"); Objects.requireNonNull(curator, "curator cannot be null"); Objects.requireNonNull(clock, "clock cannot be null"); this.controller = controller; this.clock = clock; this.order = new DeploymentOrder(controller::system); this.buildService = buildService; } public DeploymentOrder deploymentOrder() { return order; } /** * Records information when a job completes (successfully or not). This information is used when deciding what to * trigger next. */ public void notifyOfCompletion(JobReport report) { log.log(LogLevel.INFO, String.format("Notified of %s for %s of %s (%d).", report.jobError().map(e -> e.toString() + " error") .orElse("success"), report.jobType(), report.applicationId(), report.projectId())); if ( ! applications().get(report.applicationId()).isPresent()) { log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } applications().lockOrThrow(report.applicationId(), application -> { JobRun triggering; if (report.jobType() == component) { ApplicationVersion applicationVersion = ApplicationVersion.from(report.sourceRevision().get(), report.buildNumber()); triggering = JobRun.triggering(controller.systemVersion(), applicationVersion, Optional .empty(), Optional.empty(), "Application commit", clock.instant()); if (report.success()) { if (acceptNewApplicationVersion(application)) application = application.withChange(application.change().with(applicationVersion)) .withOutstandingChange(Change.empty()); else application = application.withOutstandingChange(Change.of(applicationVersion)); } } else { triggering = application.deploymentJobs().statusOf(report.jobType()).flatMap(JobStatus::lastTriggered) .orElseThrow(() -> new IllegalStateException("Notified of completion of " + report.jobType().jobName() + " for " + report.applicationId() + ", but that has neither been triggered nor deployed")); } application = application.withJobCompletion(report.projectId(), report.jobType(), triggering.completion(report.buildNumber(), clock.instant()), report.jobError()); application = application.withChange(remainingChange(application)); applications().store(application); }); } /** Returns a map of jobs that are scheduled to be run, grouped by the job type */ public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() { return computeReadyJobs().stream().collect(groupingBy(Job::jobType)); } /** * Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs. * * Only one job is triggered each run for test jobs, since their environments have limited capacity. */ public long triggerReadyJobs() { return computeReadyJobs().stream() .collect(partitioningBy(job -> job.jobType().isTest())) .entrySet().stream() .flatMap(entry -> (entry.getKey() ? entry.getValue().stream() .sorted(comparing(Job::isRetry) .thenComparing(Job::applicationUpgrade) .reversed() .thenComparing(Job::availableSince)) .collect(groupingBy(Job::jobType)) : entry.getValue().stream() .collect(groupingBy(Job::applicationId))) .values().stream() .map(jobs -> (Supplier<Long>) jobs.stream() .filter(this::trigger) .limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count)) .parallel().map(Supplier::get).reduce(0L, Long::sum); } /** * Attempts to trigger the given job for the given application and returns the outcome. * * If the build service can not find the given job, or claims it is illegal to trigger it, * the project id is removed from the application owning the job, to prevent further trigger attemps. */ public boolean trigger(Job job) { log.log(LogLevel.INFO, String.format("Triggering %s: %s", job, job.triggering)); try { buildService.trigger(job); applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withJobTriggering(job.jobType, job.triggering))); return true; } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e); if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException) applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withProjectId(OptionalLong.empty()))); return false; } } /** Force triggering of a job for given application. */ public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType, String user) { Application application = applications().require(applicationId); if (jobType == component) { buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); return singletonList(component); } Versions versions = versions(application, application.change(), deploymentFor(application, jobType)); String reason = "Job triggered manually by " + user; return (jobType.isProduction() && ! isTested(application, versions) ? testJobs(application, versions, reason, clock.instant()).stream() : Stream.of(deploymentJob(application, versions, application.change(), jobType, reason, clock.instant()))) .peek(this::trigger) .map(Job::jobType).collect(toList()); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already has an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application); }); } /** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .filter(__ -> keepApplicationChange) .map(Change::of) .orElse(Change.empty()))); }); } private ApplicationController applications() { return controller.applications(); } private Optional<JobRun> successOn(Application application, JobType jobType, Versions versions) { return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .filter(run -> targetsMatch(versions, run)); } private Optional<Deployment> deploymentFor(Application application, JobType jobType) { return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()).get())); } private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) { return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2; } /** Returns the set of all jobs which have changes to propagate from the upstream steps. */ private List<Job> computeReadyJobs() { return ApplicationList.from(applications().asList()) .notPullRequest() .withProjectId() .deploying() .idList().stream() .map(this::computeReadyJobs) .flatMap(Collection::stream) .collect(toList()); } /** * Finds the next step to trigger for the given application, if any, and returns these as a list. */ private List<JobType> runningProductionJobs(Application application) { return application.deploymentJobs().jobStatus().keySet().parallelStream() .filter(JobType::isProduction) .filter(job -> isRunning(application, job)) .collect(toList()); } /** Returns whether the given job is currently running; false if completed since last triggered, asking the build service otherwise. */ private boolean isRunning(Application application, JobType jobType) { return ! application.deploymentJobs().statusOf(jobType) .flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))) .orElse(false) && EnumSet.of(running, queued).contains(jobStateOf(application, jobType)); } private JobState jobStateOf(Application application, JobType jobType) { return buildService.stateOf(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); } /** * Returns whether the given change is complete for the given application for the given job. * * Any job is complete if the given change is already successful on that job. * A production job is also considered complete if its current change is strictly dominated by what * is already deployed in its zone, i.e., no parts of the change are upgrades, and the full current * change for the application downgrades the deployment, which is an acknowledgement that the deployed * version is broken somehow, such that the job may be locked in failure until a new version is released. */ private boolean isComplete(Change change, Application application, JobType jobType) { Optional<Deployment> existingDeployment = deploymentFor(application, jobType); return successOn(application, jobType, versions(application, change, existingDeployment)).isPresent() || jobType.isProduction() && existingDeployment.map(deployment -> ! isUpgrade(change, deployment) && isDowngrade(application.change(), deployment)) .orElse(false); } private static boolean isUpgrade(Change change, Deployment deployment) { return change.upgrades(deployment.version()) || change.upgrades(deployment.applicationVersion()); } private static boolean isDowngrade(Change change, Deployment deployment) { return change.downgrades(deployment.version()) || change.downgrades(deployment.applicationVersion()); } private boolean isTested(Application application, Versions versions) { return testedAt(application, versions).isPresent() || alreadyTriggered(application, versions); } private Optional<Instant> testedAt(Application application, Versions versions) { Optional<JobRun> testRun = successOn(application, systemTest, versions); Optional<JobRun> stagingRun = successOn(application, stagingTest, versions) .filter(run -> sourcesMatchIfPresent(versions, run)); return max(testRun.map(JobRun::at), stagingRun.map(JobRun::at)) .filter(__ -> testRun.isPresent() && stagingRun.isPresent()); } private boolean alreadyTriggered(Application application, Versions versions) { return application.deploymentJobs().jobStatus().values().stream() .filter(job -> job.type().isProduction()) .anyMatch(job -> job.lastTriggered() .filter(run -> targetsMatch(versions, run)) .filter(run -> sourcesMatchIfPresent(versions, run)) .isPresent()); } /** If the given state's sources are present and differ from its targets, returns whether they are equal to those * of the given job run. */ private static boolean sourcesMatchIfPresent(Versions versions, JobRun jobRun) { return ( ! versions.sourcePlatform.filter(version -> ! version.equals(versions.targetPlatform)).isPresent() || versions.sourcePlatform.equals(jobRun.sourcePlatform())) && ( ! versions.sourceApplication.filter(version -> ! version.equals(versions.targetApplication)).isPresent() || versions.sourceApplication.equals(jobRun.sourceApplication())); } private static boolean targetsMatch(Versions versions, JobRun jobRun) { return versions.targetPlatform.equals(jobRun.platform()) && versions.targetApplication.equals(jobRun.application()); } private boolean acceptNewApplicationVersion(Application application) { if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; return ! application.changeAt(clock.instant()).platform().isPresent(); } private Change remainingChange(Application application) { List<JobType> jobs = productionStepsOf(application).isEmpty() ? jobsOf(testStepsOf(application)) : jobsOf(productionStepsOf(application)); Change change = application.change(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutApplication(), application, job))) change = change.withoutPlatform(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutPlatform(), application, job))) change = change.withoutApplication(); return change; } /** * Returns the list of test jobs that should run now, and that need to succeed on the given versions for it to be considered tested. */ private List<Job> testJobs(Application application, Versions versions, String reason, Instant availableSince) { List<Job> jobs = new ArrayList<>(); for (JobType jobType : jobsOf(testStepsOf(application))) { Optional<JobRun> completion = successOn(application, jobType, versions) .filter(run -> sourcesMatchIfPresent(versions, run) || jobType == systemTest); if ( ! completion.isPresent() && jobStateOf(application, jobType) == idle) jobs.add(deploymentJob(application, versions, application.change(), jobType, reason, availableSince)); } return jobs; } private List<JobType> jobsOf(Collection<Step> steps) { return steps.stream().flatMap(step -> step.zones().stream()).map(order::toJob).collect(toList()); } private List<Step> testStepsOf(Application application) { return application.deploymentSpec().steps().isEmpty() ? singletonList(new DeploymentSpec.DeclaredZone(test)) : application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(test) || step.deploysTo(staging)) .collect(toList()); } private List<Step> productionStepsOf(Application application) { return application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(prod) || step.zones().isEmpty()) .collect(toList()); } private Job deploymentJob(Application application, Versions versions, Change change, JobType jobType, String reason, Instant availableSince) { boolean isRetry = application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::jobError) .filter(JobError.outOfCapacity::equals).isPresent(); if (isRetry) reason += "; retrying on out of capacity"; JobRun triggering = JobRun.triggering(versions.targetPlatform, versions.targetApplication, versions.sourcePlatform, versions.sourceApplication, reason, clock.instant()); return new Job(application, triggering, jobType, availableSince, isRetry, change.application().isPresent()); } private Versions versions(Application application, Change change, Optional<Deployment> deployment) { return new Versions(targetPlatform(application, change, deployment), targetApplication(application, change, deployment), deployment.map(Deployment::version), deployment.map(Deployment::applicationVersion)); } private Version targetPlatform(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::version), change.platform()) .orElse(application.oldestDeployedPlatform() .orElse(controller.systemVersion())); } private ApplicationVersion targetApplication(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::applicationVersion), change.application()) .orElse(application.oldestDeployedApplication() .orElse(application.deploymentJobs().jobStatus().get(component).lastSuccess().get().application())); } private static class Job extends BuildJob { private final JobType jobType; private final JobRun triggering; private final Instant availableSince; private final boolean isRetry; private final boolean isApplicationUpgrade; private Job(Application application, JobRun triggering, JobType jobType, Instant availableSince, boolean isRetry, boolean isApplicationUpgrade) { super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()); this.jobType = jobType; this.triggering = triggering; this.availableSince = availableSince; this.isRetry = isRetry; this.isApplicationUpgrade = isApplicationUpgrade; } JobType jobType() { return jobType; } Instant availableSince() { return availableSince; } boolean isRetry() { return isRetry; } boolean applicationUpgrade() { return isApplicationUpgrade; } } private static class Versions { private final Version targetPlatform; private final ApplicationVersion targetApplication; private final Optional<Version> sourcePlatform; private final Optional<ApplicationVersion> sourceApplication; private Versions(Version targetPlatform, ApplicationVersion targetApplication, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication) { this.targetPlatform = targetPlatform; this.targetApplication = targetApplication; this.sourcePlatform = sourcePlatform; this.sourceApplication = sourceApplication; } @Override public String toString() { return String.format("platform %s%s, application %s%s", sourcePlatform.filter(source -> ! source.equals(targetPlatform)) .map(source -> source + " -> ").orElse(""), targetPlatform, sourceApplication.filter(source -> ! source.equals(targetApplication)) .map(source -> source.id() + " -> ").orElse(""), targetApplication.id()); } } }
Ok, I tried it out. I find it hinders readability, because the logic is hidden. It's also used just this once, so there isn't any code reuse benefit to it. Also, the argument list becomes quite large.
private List<Job> computeReadyJobs(ApplicationId id) { List<Job> jobs = new ArrayList<>(); applications().get(id).ifPresent(application -> { Change change = application.changeAt(clock.instant()); Optional<Instant> completedAt = max(application.deploymentJobs().statusOf(systemTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at)), application.deploymentJobs().statusOf(stagingTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at))); String reason = "New change available"; List<Job> testJobs = null; if (change.isPresent()) for (Step step : productionStepsOf(application)) { Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet()); Map<Optional<Instant>, List<JobType>> jobsByCompletion = stepJobs.stream().collect(groupingBy(job -> completedAt(change, application, job))); if (jobsByCompletion.containsKey(Optional.empty())) { for (JobType job : jobsByCompletion.get(Optional.empty())) { Versions versions = versions(application, change, deploymentFor(application, job)); if (isTested(application, versions)) { if ( completedAt.isPresent() && jobStateIsAmong(application, job, idle) && stepJobs.containsAll(runningProductionJobs(application))) jobs.add(deploymentJob(application, versions, change, job, reason, completedAt.get())); if ( ! alreadyTriggered(application, versions)) testJobs = emptyList(); } else if (testJobs == null) { testJobs = testJobs(application, versions, String.format("Testing deployment for %s (%s)", job.jobName(), versions.toString()), completedAt.orElse(clock.instant())); } } completedAt = Optional.empty(); } else { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = jobsByCompletion.keySet().stream().map(Optional::get).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } } if (testJobs == null) testJobs = testJobs(application, versions(application, application.change(), Optional.empty()), "Testing last changes outside prod", clock.instant()); jobs.addAll(testJobs); }); return jobs; }
&& jobStateIsAmong(application, job, idle)
private List<Job> computeReadyJobs(ApplicationId id) { List<Job> jobs = new ArrayList<>(); applications().get(id).ifPresent(application -> { Change change = application.changeAt(clock.instant()); Optional<Instant> completedAt = max(application.deploymentJobs().statusOf(systemTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at)), application.deploymentJobs().statusOf(stagingTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at))); String reason = "New change available"; List<Job> testJobs = null; if (change.isPresent()) for (Step step : productionStepsOf(application)) { Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet()); List<JobType> remainingJobs = stepJobs.stream().filter(job -> ! isComplete(change, application, job)).collect(toList()); if ( ! remainingJobs.isEmpty()) { for (JobType job : remainingJobs) { Versions versions = versions(application, change, deploymentFor(application, job)); if (isTested(application, versions)) { if ( completedAt.isPresent() && jobStateOf(application, job) == idle && stepJobs.containsAll(runningProductionJobs(application))) jobs.add(deploymentJob(application, versions, change, job, reason, completedAt.get())); if ( ! alreadyTriggered(application, versions)) testJobs = emptyList(); } else if (testJobs == null) { testJobs = testJobs(application, versions, String.format("Testing deployment for %s (%s)", job.jobName(), versions.toString()), completedAt.orElse(clock.instant())); } } completedAt = Optional.empty(); } else { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = stepJobs.stream().map(job -> application.deploymentJobs().statusOf(job).get().lastCompleted().get().at()).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } } if (testJobs == null) testJobs = testJobs(application, versions(application, application.change(), Optional.empty()), "Testing last changes outside prod", clock.instant()); jobs.addAll(testJobs); }); return jobs; }
class DeploymentTrigger { private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentOrder order; private final BuildService buildService; public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) { Objects.requireNonNull(controller, "controller cannot be null"); Objects.requireNonNull(curator, "curator cannot be null"); Objects.requireNonNull(clock, "clock cannot be null"); this.controller = controller; this.clock = clock; this.order = new DeploymentOrder(controller::system); this.buildService = buildService; } public DeploymentOrder deploymentOrder() { return order; } /** * Records information when a job completes (successfully or not). This information is used when deciding what to * trigger next. */ public void notifyOfCompletion(JobReport report) { log.log(LogLevel.INFO, String.format("Notified of %s for %s of %s (%d).", report.jobError().map(e -> e.toString() + " error") .orElse("success"), report.jobType(), report.applicationId(), report.projectId())); if ( ! applications().get(report.applicationId()).isPresent()) { log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } applications().lockOrThrow(report.applicationId(), application -> { JobRun triggering; if (report.jobType() == component) { ApplicationVersion applicationVersion = ApplicationVersion.from(report.sourceRevision().get(), report.buildNumber()); triggering = JobRun.triggering(controller.systemVersion(), applicationVersion, Optional .empty(), Optional.empty(), "Application commit", clock.instant()); if (report.success()) { if (acceptNewApplicationVersion(application)) application = application.withChange(application.change().with(applicationVersion)) .withOutstandingChange(Change.empty()); else application = application.withOutstandingChange(Change.of(applicationVersion)); } } else { triggering = application.deploymentJobs().statusOf(report.jobType()).flatMap(JobStatus::lastTriggered) .orElseThrow(() -> new IllegalStateException("Notified of completion of " + report.jobType().jobName() + " for " + report.applicationId() + ", but that has neither been triggered nor deployed")); } application = application.withJobCompletion(report.projectId(), report.jobType(), triggering.completion(report.buildNumber(), clock.instant()), report.jobError()); application = application.withChange(remainingChange(application)); applications().store(application); }); } /** Returns a map of jobs that are scheduled to be run, grouped by the job type */ public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() { return computeReadyJobs().stream().collect(groupingBy(Job::jobType)); } /** * Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs. * * Only one job is triggered each run for test jobs, since their environments have limited capacity. */ public long triggerReadyJobs() { return computeReadyJobs().stream() .collect(partitioningBy(job -> job.jobType().isTest())) .entrySet().stream() .flatMap(entry -> (entry.getKey() ? entry.getValue().stream() .sorted(comparing(Job::isRetry) .thenComparing(Job::applicationUpgrade) .reversed() .thenComparing(Job::availableSince)) .collect(groupingBy(Job::jobType)) : entry.getValue().stream() .collect(groupingBy(Job::applicationId))) .values().stream() .map(jobs -> (Supplier<Long>) jobs.stream() .filter(this::trigger) .limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count)) .parallel().map(Supplier::get).reduce(0L, Long::sum); } /** * Attempts to trigger the given job for the given application and returns the outcome. * * If the build service can not find the given job, or claims it is illegal to trigger it, * the project id is removed from the application owning the job, to prevent further trigger attemps. */ public boolean trigger(Job job) { log.log(LogLevel.INFO, String.format("Triggering %s: %s", job, job.triggering)); try { buildService.trigger(job); applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withJobTriggering(job.jobType, job.triggering))); return true; } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e); if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException) applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withProjectId(OptionalLong.empty()))); return false; } } /** Force triggering of a job for given application. */ public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType, String user) { Application application = applications().require(applicationId); if (jobType == component) { buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); return singletonList(component); } Versions versions = versions(application, application.change(), deploymentFor(application, jobType)); String reason = "Job triggered manually by " + user; return (jobType.isProduction() && ! isTested(application, versions) ? testJobs(application, versions, reason, clock.instant()).stream() : Stream.of(deploymentJob(application, versions, application.change(), jobType, reason, clock.instant()))) .peek(this::trigger) .map(Job::jobType).collect(toList()); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already has an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application); }); } /** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .filter(__ -> keepApplicationChange) .map(Change::of) .orElse(Change.empty()))); }); } private ApplicationController applications() { return controller.applications(); } private Optional<JobRun> successOn(Application application, JobType jobType, Versions versions) { return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .filter(run -> targetsMatch(versions, run)); } private Optional<Deployment> deploymentFor(Application application, JobType jobType) { return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()).get())); } private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) { return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2; } /** Returns the set of all jobs which have changes to propagate from the upstream steps. */ private List<Job> computeReadyJobs() { return ApplicationList.from(applications().asList()) .notPullRequest() .withProjectId() .deploying() .idList().stream() .map(this::computeReadyJobs) .flatMap(Collection::stream) .collect(toList()); } /** * Finds the next step to trigger for the given application, if any, and returns these as a list. */ private List<JobType> runningProductionJobs(Application application) { return application.deploymentJobs().jobStatus().keySet().parallelStream() .filter(JobType::isProduction) .filter(job -> isRunning(application, job)) .collect(toList()); } /** Returns whether the given job is currently running; false if completed since last triggered, asking the build service otherwise. */ private boolean isRunning(Application application, JobType jobType) { return ! application.deploymentJobs().statusOf(jobType) .flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))) .orElse(false) && jobStateIsAmong(application, jobType, running, queued); } private boolean jobStateIsAmong(Application application, JobType jobType, JobState... states) { return Arrays.asList(states).contains(buildService.stateOf(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()))); } /** * Returns the instant when the given change is complete for the given application for the given job. * * Any job is complete if the given change is already successful on that job. * A production job is also considered complete if its current change is strictly dominated by what * is already deployed in its zone, i.e., no parts of the change are upgrades, and the full current * change for the application downgrades the deployment, which is an acknowledgement that the deployed * version is broken somehow, such that the job may be locked in failure until a new version is released. */ private Optional<Instant> completedAt(Change change, Application application, JobType jobType) { Versions versions = versions(application, change, deploymentFor(application, jobType)); Optional<JobRun> lastSuccess = successOn(application, jobType, versions); if (lastSuccess.isPresent() || ! jobType.isProduction()) return lastSuccess.map(JobRun::at); return deploymentFor(application, jobType) .filter(deployment -> ! isUpgrade(change, deployment) && isDowngrade(application.change(), deployment)) .map(Deployment::at); } private static boolean isUpgrade(Change change, Deployment deployment) { return change.upgrades(deployment.version()) || change.upgrades(deployment.applicationVersion()); } private static boolean isDowngrade(Change change, Deployment deployment) { return change.downgrades(deployment.version()) || change.downgrades(deployment.applicationVersion()); } private boolean isTested(Application application, Versions versions) { return testedAt(application, versions).isPresent() || alreadyTriggered(application, versions); } private Optional<Instant> testedAt(Application application, Versions versions) { Optional<JobRun> testRun = successOn(application, systemTest, versions); Optional<JobRun> stagingRun = successOn(application, stagingTest, versions) .filter(run -> sourcesMatchIfPresent(versions, run)); return max(testRun.map(JobRun::at), stagingRun.map(JobRun::at)) .filter(__ -> testRun.isPresent() && stagingRun.isPresent()); } private boolean alreadyTriggered(Application application, Versions versions) { return application.deploymentJobs().jobStatus().values().stream() .filter(job -> job.type().isProduction()) .anyMatch(job -> job.lastTriggered() .filter(run -> targetsMatch(versions, run)) .filter(run -> sourcesMatchIfPresent(versions, run)) .isPresent()); } /** If the given state's sources are present and differ from its targets, returns whether they are equal to those * of the given job run. */ private static boolean sourcesMatchIfPresent(Versions versions, JobRun jobRun) { return ( ! versions.sourcePlatform.filter(version -> ! version.equals(versions.targetPlatform)).isPresent() || versions.sourcePlatform.equals(jobRun.sourcePlatform())) && ( ! versions.sourceApplication.filter(version -> ! version.equals(versions.targetApplication)).isPresent() || versions.sourceApplication.equals(jobRun.sourceApplication())); } private static boolean targetsMatch(Versions versions, JobRun jobRun) { return versions.targetPlatform.equals(jobRun.platform()) && versions.targetApplication.equals(jobRun.application()); } private boolean acceptNewApplicationVersion(Application application) { if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; return ! application.changeAt(clock.instant()).platform().isPresent(); } private Change remainingChange(Application application) { List<JobType> jobs = productionStepsOf(application).isEmpty() ? jobsOf(testStepsOf(application)) : jobsOf(productionStepsOf(application)); Change change = application.change(); if (jobs.stream().allMatch(job -> completedAt(application.change().withoutApplication(), application, job).isPresent())) change = change.withoutPlatform(); if (jobs.stream().allMatch(job -> completedAt(application.change().withoutPlatform(), application, job).isPresent())) change = change.withoutApplication(); return change; } /** * Returns the list of test jobs that should run now, and that need to succeed on the given versions for it to be considered tested. */ private List<Job> testJobs(Application application, Versions versions, String reason, Instant availableSince) { List<Job> jobs = new ArrayList<>(); for (JobType jobType : jobsOf(testStepsOf(application))) { Optional<JobRun> completion = successOn(application, jobType, versions) .filter(run -> sourcesMatchIfPresent(versions, run) || jobType == systemTest); if ( ! completion.isPresent() && jobStateIsAmong(application, jobType, idle)) jobs.add(deploymentJob(application, versions, application.change(), jobType, reason, availableSince)); } return jobs; } private List<JobType> jobsOf(Collection<Step> steps) { return steps.stream().flatMap(step -> step.zones().stream()).map(order::toJob).collect(toList()); } private List<Step> testStepsOf(Application application) { return application.deploymentSpec().steps().isEmpty() ? singletonList(new DeploymentSpec.DeclaredZone(test)) : application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(test) || step.deploysTo(staging)) .collect(toList()); } private List<Step> productionStepsOf(Application application) { return application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(prod) || step.zones().isEmpty()) .collect(toList()); } private Job deploymentJob(Application application, Versions versions, Change change, JobType jobType, String reason, Instant availableSince) { boolean isRetry = application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::jobError) .filter(JobError.outOfCapacity::equals).isPresent(); if (isRetry) reason += "; retrying on out of capacity"; JobRun triggering = JobRun.triggering(versions.targetPlatform, versions.targetApplication, versions.sourcePlatform, versions.sourceApplication, reason, clock.instant()); return new Job(application, triggering, jobType, availableSince, isRetry, change.application().isPresent()); } private Versions versions(Application application, Change change, Optional<Deployment> deployment) { return new Versions(targetPlatform(application, change, deployment), targetApplication(application, change, deployment), deployment.map(Deployment::version), deployment.map(Deployment::applicationVersion)); } private Version targetPlatform(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::version), change.platform()) .orElse(application.oldestDeployedPlatform() .orElse(controller.systemVersion())); } private ApplicationVersion targetApplication(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::applicationVersion), change.application()) .orElse(application.oldestDeployedApplication() .orElse(application.deploymentJobs().jobStatus().get(component).lastSuccess().get().application())); } private static class Job extends BuildJob { private final JobType jobType; private final JobRun triggering; private final Instant availableSince; private final boolean isRetry; private final boolean isApplicationUpgrade; private Job(Application application, JobRun triggering, JobType jobType, Instant availableSince, boolean isRetry, boolean isApplicationUpgrade) { super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()); this.jobType = jobType; this.triggering = triggering; this.availableSince = availableSince; this.isRetry = isRetry; this.isApplicationUpgrade = isApplicationUpgrade; } JobType jobType() { return jobType; } Instant availableSince() { return availableSince; } boolean isRetry() { return isRetry; } boolean applicationUpgrade() { return isApplicationUpgrade; } } private static class Versions { private final Version targetPlatform; private final ApplicationVersion targetApplication; private final Optional<Version> sourcePlatform; private final Optional<ApplicationVersion> sourceApplication; private Versions(Version targetPlatform, ApplicationVersion targetApplication, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication) { this.targetPlatform = targetPlatform; this.targetApplication = targetApplication; this.sourcePlatform = sourcePlatform; this.sourceApplication = sourceApplication; } @Override public String toString() { return String.format("platform %s%s, application %s%s", sourcePlatform.filter(source -> ! source.equals(targetPlatform)) .map(source -> source + " -> ").orElse(""), targetPlatform, sourceApplication.filter(source -> ! source.equals(targetApplication)) .map(source -> source.id() + " -> ").orElse(""), targetApplication.id()); } } }
class DeploymentTrigger { private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentOrder order; private final BuildService buildService; public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) { Objects.requireNonNull(controller, "controller cannot be null"); Objects.requireNonNull(curator, "curator cannot be null"); Objects.requireNonNull(clock, "clock cannot be null"); this.controller = controller; this.clock = clock; this.order = new DeploymentOrder(controller::system); this.buildService = buildService; } public DeploymentOrder deploymentOrder() { return order; } /** * Records information when a job completes (successfully or not). This information is used when deciding what to * trigger next. */ public void notifyOfCompletion(JobReport report) { log.log(LogLevel.INFO, String.format("Notified of %s for %s of %s (%d).", report.jobError().map(e -> e.toString() + " error") .orElse("success"), report.jobType(), report.applicationId(), report.projectId())); if ( ! applications().get(report.applicationId()).isPresent()) { log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } applications().lockOrThrow(report.applicationId(), application -> { JobRun triggering; if (report.jobType() == component) { ApplicationVersion applicationVersion = ApplicationVersion.from(report.sourceRevision().get(), report.buildNumber()); triggering = JobRun.triggering(controller.systemVersion(), applicationVersion, Optional .empty(), Optional.empty(), "Application commit", clock.instant()); if (report.success()) { if (acceptNewApplicationVersion(application)) application = application.withChange(application.change().with(applicationVersion)) .withOutstandingChange(Change.empty()); else application = application.withOutstandingChange(Change.of(applicationVersion)); } } else { triggering = application.deploymentJobs().statusOf(report.jobType()).flatMap(JobStatus::lastTriggered) .orElseThrow(() -> new IllegalStateException("Notified of completion of " + report.jobType().jobName() + " for " + report.applicationId() + ", but that has neither been triggered nor deployed")); } application = application.withJobCompletion(report.projectId(), report.jobType(), triggering.completion(report.buildNumber(), clock.instant()), report.jobError()); application = application.withChange(remainingChange(application)); applications().store(application); }); } /** Returns a map of jobs that are scheduled to be run, grouped by the job type */ public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() { return computeReadyJobs().stream().collect(groupingBy(Job::jobType)); } /** * Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs. * * Only one job is triggered each run for test jobs, since their environments have limited capacity. */ public long triggerReadyJobs() { return computeReadyJobs().stream() .collect(partitioningBy(job -> job.jobType().isTest())) .entrySet().stream() .flatMap(entry -> (entry.getKey() ? entry.getValue().stream() .sorted(comparing(Job::isRetry) .thenComparing(Job::applicationUpgrade) .reversed() .thenComparing(Job::availableSince)) .collect(groupingBy(Job::jobType)) : entry.getValue().stream() .collect(groupingBy(Job::applicationId))) .values().stream() .map(jobs -> (Supplier<Long>) jobs.stream() .filter(this::trigger) .limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count)) .parallel().map(Supplier::get).reduce(0L, Long::sum); } /** * Attempts to trigger the given job for the given application and returns the outcome. * * If the build service can not find the given job, or claims it is illegal to trigger it, * the project id is removed from the application owning the job, to prevent further trigger attemps. */ public boolean trigger(Job job) { log.log(LogLevel.INFO, String.format("Triggering %s: %s", job, job.triggering)); try { buildService.trigger(job); applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withJobTriggering(job.jobType, job.triggering))); return true; } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e); if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException) applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withProjectId(OptionalLong.empty()))); return false; } } /** Force triggering of a job for given application. */ public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType, String user) { Application application = applications().require(applicationId); if (jobType == component) { buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); return singletonList(component); } Versions versions = versions(application, application.change(), deploymentFor(application, jobType)); String reason = "Job triggered manually by " + user; return (jobType.isProduction() && ! isTested(application, versions) ? testJobs(application, versions, reason, clock.instant()).stream() : Stream.of(deploymentJob(application, versions, application.change(), jobType, reason, clock.instant()))) .peek(this::trigger) .map(Job::jobType).collect(toList()); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already has an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application); }); } /** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .filter(__ -> keepApplicationChange) .map(Change::of) .orElse(Change.empty()))); }); } private ApplicationController applications() { return controller.applications(); } private Optional<JobRun> successOn(Application application, JobType jobType, Versions versions) { return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .filter(run -> targetsMatch(versions, run)); } private Optional<Deployment> deploymentFor(Application application, JobType jobType) { return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()).get())); } private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) { return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2; } /** Returns the set of all jobs which have changes to propagate from the upstream steps. */ private List<Job> computeReadyJobs() { return ApplicationList.from(applications().asList()) .notPullRequest() .withProjectId() .deploying() .idList().stream() .map(this::computeReadyJobs) .flatMap(Collection::stream) .collect(toList()); } /** * Finds the next step to trigger for the given application, if any, and returns these as a list. */ private List<JobType> runningProductionJobs(Application application) { return application.deploymentJobs().jobStatus().keySet().parallelStream() .filter(JobType::isProduction) .filter(job -> isRunning(application, job)) .collect(toList()); } /** Returns whether the given job is currently running; false if completed since last triggered, asking the build service otherwise. */ private boolean isRunning(Application application, JobType jobType) { return ! application.deploymentJobs().statusOf(jobType) .flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))) .orElse(false) && EnumSet.of(running, queued).contains(jobStateOf(application, jobType)); } private JobState jobStateOf(Application application, JobType jobType) { return buildService.stateOf(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); } /** * Returns whether the given change is complete for the given application for the given job. * * Any job is complete if the given change is already successful on that job. * A production job is also considered complete if its current change is strictly dominated by what * is already deployed in its zone, i.e., no parts of the change are upgrades, and the full current * change for the application downgrades the deployment, which is an acknowledgement that the deployed * version is broken somehow, such that the job may be locked in failure until a new version is released. */ private boolean isComplete(Change change, Application application, JobType jobType) { Optional<Deployment> existingDeployment = deploymentFor(application, jobType); return successOn(application, jobType, versions(application, change, existingDeployment)).isPresent() || jobType.isProduction() && existingDeployment.map(deployment -> ! isUpgrade(change, deployment) && isDowngrade(application.change(), deployment)) .orElse(false); } private static boolean isUpgrade(Change change, Deployment deployment) { return change.upgrades(deployment.version()) || change.upgrades(deployment.applicationVersion()); } private static boolean isDowngrade(Change change, Deployment deployment) { return change.downgrades(deployment.version()) || change.downgrades(deployment.applicationVersion()); } private boolean isTested(Application application, Versions versions) { return testedAt(application, versions).isPresent() || alreadyTriggered(application, versions); } private Optional<Instant> testedAt(Application application, Versions versions) { Optional<JobRun> testRun = successOn(application, systemTest, versions); Optional<JobRun> stagingRun = successOn(application, stagingTest, versions) .filter(run -> sourcesMatchIfPresent(versions, run)); return max(testRun.map(JobRun::at), stagingRun.map(JobRun::at)) .filter(__ -> testRun.isPresent() && stagingRun.isPresent()); } private boolean alreadyTriggered(Application application, Versions versions) { return application.deploymentJobs().jobStatus().values().stream() .filter(job -> job.type().isProduction()) .anyMatch(job -> job.lastTriggered() .filter(run -> targetsMatch(versions, run)) .filter(run -> sourcesMatchIfPresent(versions, run)) .isPresent()); } /** If the given state's sources are present and differ from its targets, returns whether they are equal to those * of the given job run. */ private static boolean sourcesMatchIfPresent(Versions versions, JobRun jobRun) { return ( ! versions.sourcePlatform.filter(version -> ! version.equals(versions.targetPlatform)).isPresent() || versions.sourcePlatform.equals(jobRun.sourcePlatform())) && ( ! versions.sourceApplication.filter(version -> ! version.equals(versions.targetApplication)).isPresent() || versions.sourceApplication.equals(jobRun.sourceApplication())); } private static boolean targetsMatch(Versions versions, JobRun jobRun) { return versions.targetPlatform.equals(jobRun.platform()) && versions.targetApplication.equals(jobRun.application()); } private boolean acceptNewApplicationVersion(Application application) { if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; return ! application.changeAt(clock.instant()).platform().isPresent(); } private Change remainingChange(Application application) { List<JobType> jobs = productionStepsOf(application).isEmpty() ? jobsOf(testStepsOf(application)) : jobsOf(productionStepsOf(application)); Change change = application.change(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutApplication(), application, job))) change = change.withoutPlatform(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutPlatform(), application, job))) change = change.withoutApplication(); return change; } /** * Returns the list of test jobs that should run now, and that need to succeed on the given versions for it to be considered tested. */ private List<Job> testJobs(Application application, Versions versions, String reason, Instant availableSince) { List<Job> jobs = new ArrayList<>(); for (JobType jobType : jobsOf(testStepsOf(application))) { Optional<JobRun> completion = successOn(application, jobType, versions) .filter(run -> sourcesMatchIfPresent(versions, run) || jobType == systemTest); if ( ! completion.isPresent() && jobStateOf(application, jobType) == idle) jobs.add(deploymentJob(application, versions, application.change(), jobType, reason, availableSince)); } return jobs; } private List<JobType> jobsOf(Collection<Step> steps) { return steps.stream().flatMap(step -> step.zones().stream()).map(order::toJob).collect(toList()); } private List<Step> testStepsOf(Application application) { return application.deploymentSpec().steps().isEmpty() ? singletonList(new DeploymentSpec.DeclaredZone(test)) : application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(test) || step.deploysTo(staging)) .collect(toList()); } private List<Step> productionStepsOf(Application application) { return application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(prod) || step.zones().isEmpty()) .collect(toList()); } private Job deploymentJob(Application application, Versions versions, Change change, JobType jobType, String reason, Instant availableSince) { boolean isRetry = application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::jobError) .filter(JobError.outOfCapacity::equals).isPresent(); if (isRetry) reason += "; retrying on out of capacity"; JobRun triggering = JobRun.triggering(versions.targetPlatform, versions.targetApplication, versions.sourcePlatform, versions.sourceApplication, reason, clock.instant()); return new Job(application, triggering, jobType, availableSince, isRetry, change.application().isPresent()); } private Versions versions(Application application, Change change, Optional<Deployment> deployment) { return new Versions(targetPlatform(application, change, deployment), targetApplication(application, change, deployment), deployment.map(Deployment::version), deployment.map(Deployment::applicationVersion)); } private Version targetPlatform(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::version), change.platform()) .orElse(application.oldestDeployedPlatform() .orElse(controller.systemVersion())); } private ApplicationVersion targetApplication(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::applicationVersion), change.application()) .orElse(application.oldestDeployedApplication() .orElse(application.deploymentJobs().jobStatus().get(component).lastSuccess().get().application())); } private static class Job extends BuildJob { private final JobType jobType; private final JobRun triggering; private final Instant availableSince; private final boolean isRetry; private final boolean isApplicationUpgrade; private Job(Application application, JobRun triggering, JobType jobType, Instant availableSince, boolean isRetry, boolean isApplicationUpgrade) { super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()); this.jobType = jobType; this.triggering = triggering; this.availableSince = availableSince; this.isRetry = isRetry; this.isApplicationUpgrade = isApplicationUpgrade; } JobType jobType() { return jobType; } Instant availableSince() { return availableSince; } boolean isRetry() { return isRetry; } boolean applicationUpgrade() { return isApplicationUpgrade; } } private static class Versions { private final Version targetPlatform; private final ApplicationVersion targetApplication; private final Optional<Version> sourcePlatform; private final Optional<ApplicationVersion> sourceApplication; private Versions(Version targetPlatform, ApplicationVersion targetApplication, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication) { this.targetPlatform = targetPlatform; this.targetApplication = targetApplication; this.sourcePlatform = sourcePlatform; this.sourceApplication = sourceApplication; } @Override public String toString() { return String.format("platform %s%s, application %s%s", sourcePlatform.filter(source -> ! source.equals(targetPlatform)) .map(source -> source + " -> ").orElse(""), targetPlatform, sourceApplication.filter(source -> ! source.equals(targetApplication)) .map(source -> source.id() + " -> ").orElse(""), targetApplication.id()); } } }
I think the problem here is that the code _is_ hard to read, but it's _not_ because of the type of the keys in this map. If you take a step back, I think you'll agree to that. Mixing in magic-value `Instant`s doesn't really help -- I think it makes it even worse.
private List<Job> computeReadyJobs(ApplicationId id) { List<Job> jobs = new ArrayList<>(); applications().get(id).ifPresent(application -> { Change change = application.changeAt(clock.instant()); Optional<Instant> completedAt = max(application.deploymentJobs().statusOf(systemTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at)), application.deploymentJobs().statusOf(stagingTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at))); String reason = "New change available"; List<Job> testJobs = null; if (change.isPresent()) for (Step step : productionStepsOf(application)) { Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet()); Map<Optional<Instant>, List<JobType>> jobsByCompletion = stepJobs.stream().collect(groupingBy(job -> completedAt(change, application, job))); if (jobsByCompletion.containsKey(Optional.empty())) { for (JobType job : jobsByCompletion.get(Optional.empty())) { Versions versions = versions(application, change, deploymentFor(application, job)); if (isTested(application, versions)) { if ( completedAt.isPresent() && jobStateIsAmong(application, job, idle) && stepJobs.containsAll(runningProductionJobs(application))) jobs.add(deploymentJob(application, versions, change, job, reason, completedAt.get())); if ( ! alreadyTriggered(application, versions)) testJobs = emptyList(); } else if (testJobs == null) { testJobs = testJobs(application, versions, String.format("Testing deployment for %s (%s)", job.jobName(), versions.toString()), completedAt.orElse(clock.instant())); } } completedAt = Optional.empty(); } else { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = jobsByCompletion.keySet().stream().map(Optional::get).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } } if (testJobs == null) testJobs = testJobs(application, versions(application, application.change(), Optional.empty()), "Testing last changes outside prod", clock.instant()); jobs.addAll(testJobs); }); return jobs; }
if (jobsByCompletion.containsKey(Optional.empty())) {
private List<Job> computeReadyJobs(ApplicationId id) { List<Job> jobs = new ArrayList<>(); applications().get(id).ifPresent(application -> { Change change = application.changeAt(clock.instant()); Optional<Instant> completedAt = max(application.deploymentJobs().statusOf(systemTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at)), application.deploymentJobs().statusOf(stagingTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at))); String reason = "New change available"; List<Job> testJobs = null; if (change.isPresent()) for (Step step : productionStepsOf(application)) { Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet()); List<JobType> remainingJobs = stepJobs.stream().filter(job -> ! isComplete(change, application, job)).collect(toList()); if ( ! remainingJobs.isEmpty()) { for (JobType job : remainingJobs) { Versions versions = versions(application, change, deploymentFor(application, job)); if (isTested(application, versions)) { if ( completedAt.isPresent() && jobStateOf(application, job) == idle && stepJobs.containsAll(runningProductionJobs(application))) jobs.add(deploymentJob(application, versions, change, job, reason, completedAt.get())); if ( ! alreadyTriggered(application, versions)) testJobs = emptyList(); } else if (testJobs == null) { testJobs = testJobs(application, versions, String.format("Testing deployment for %s (%s)", job.jobName(), versions.toString()), completedAt.orElse(clock.instant())); } } completedAt = Optional.empty(); } else { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = stepJobs.stream().map(job -> application.deploymentJobs().statusOf(job).get().lastCompleted().get().at()).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } } if (testJobs == null) testJobs = testJobs(application, versions(application, application.change(), Optional.empty()), "Testing last changes outside prod", clock.instant()); jobs.addAll(testJobs); }); return jobs; }
class DeploymentTrigger { private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentOrder order; private final BuildService buildService; public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) { Objects.requireNonNull(controller, "controller cannot be null"); Objects.requireNonNull(curator, "curator cannot be null"); Objects.requireNonNull(clock, "clock cannot be null"); this.controller = controller; this.clock = clock; this.order = new DeploymentOrder(controller::system); this.buildService = buildService; } public DeploymentOrder deploymentOrder() { return order; } /** * Records information when a job completes (successfully or not). This information is used when deciding what to * trigger next. */ public void notifyOfCompletion(JobReport report) { log.log(LogLevel.INFO, String.format("Notified of %s for %s of %s (%d).", report.jobError().map(e -> e.toString() + " error") .orElse("success"), report.jobType(), report.applicationId(), report.projectId())); if ( ! applications().get(report.applicationId()).isPresent()) { log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } applications().lockOrThrow(report.applicationId(), application -> { JobRun triggering; if (report.jobType() == component) { ApplicationVersion applicationVersion = ApplicationVersion.from(report.sourceRevision().get(), report.buildNumber()); triggering = JobRun.triggering(controller.systemVersion(), applicationVersion, Optional .empty(), Optional.empty(), "Application commit", clock.instant()); if (report.success()) { if (acceptNewApplicationVersion(application)) application = application.withChange(application.change().with(applicationVersion)) .withOutstandingChange(Change.empty()); else application = application.withOutstandingChange(Change.of(applicationVersion)); } } else { triggering = application.deploymentJobs().statusOf(report.jobType()).flatMap(JobStatus::lastTriggered) .orElseThrow(() -> new IllegalStateException("Notified of completion of " + report.jobType().jobName() + " for " + report.applicationId() + ", but that has neither been triggered nor deployed")); } application = application.withJobCompletion(report.projectId(), report.jobType(), triggering.completion(report.buildNumber(), clock.instant()), report.jobError()); application = application.withChange(remainingChange(application)); applications().store(application); }); } /** Returns a map of jobs that are scheduled to be run, grouped by the job type */ public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() { return computeReadyJobs().stream().collect(groupingBy(Job::jobType)); } /** * Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs. * * Only one job is triggered each run for test jobs, since their environments have limited capacity. */ public long triggerReadyJobs() { return computeReadyJobs().stream() .collect(partitioningBy(job -> job.jobType().isTest())) .entrySet().stream() .flatMap(entry -> (entry.getKey() ? entry.getValue().stream() .sorted(comparing(Job::isRetry) .thenComparing(Job::applicationUpgrade) .reversed() .thenComparing(Job::availableSince)) .collect(groupingBy(Job::jobType)) : entry.getValue().stream() .collect(groupingBy(Job::applicationId))) .values().stream() .map(jobs -> (Supplier<Long>) jobs.stream() .filter(this::trigger) .limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count)) .parallel().map(Supplier::get).reduce(0L, Long::sum); } /** * Attempts to trigger the given job for the given application and returns the outcome. * * If the build service can not find the given job, or claims it is illegal to trigger it, * the project id is removed from the application owning the job, to prevent further trigger attemps. */ public boolean trigger(Job job) { log.log(LogLevel.INFO, String.format("Triggering %s: %s", job, job.triggering)); try { buildService.trigger(job); applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withJobTriggering(job.jobType, job.triggering))); return true; } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e); if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException) applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withProjectId(OptionalLong.empty()))); return false; } } /** Force triggering of a job for given application. */ public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType, String user) { Application application = applications().require(applicationId); if (jobType == component) { buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); return singletonList(component); } Versions versions = versions(application, application.change(), deploymentFor(application, jobType)); String reason = "Job triggered manually by " + user; return (jobType.isProduction() && ! isTested(application, versions) ? testJobs(application, versions, reason, clock.instant()).stream() : Stream.of(deploymentJob(application, versions, application.change(), jobType, reason, clock.instant()))) .peek(this::trigger) .map(Job::jobType).collect(toList()); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already has an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application); }); } /** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .filter(__ -> keepApplicationChange) .map(Change::of) .orElse(Change.empty()))); }); } private ApplicationController applications() { return controller.applications(); } private Optional<JobRun> successOn(Application application, JobType jobType, Versions versions) { return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .filter(run -> targetsMatch(versions, run)); } private Optional<Deployment> deploymentFor(Application application, JobType jobType) { return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()).get())); } private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) { return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2; } /** Returns the set of all jobs which have changes to propagate from the upstream steps. */ private List<Job> computeReadyJobs() { return ApplicationList.from(applications().asList()) .notPullRequest() .withProjectId() .deploying() .idList().stream() .map(this::computeReadyJobs) .flatMap(Collection::stream) .collect(toList()); } /** * Finds the next step to trigger for the given application, if any, and returns these as a list. */ private List<JobType> runningProductionJobs(Application application) { return application.deploymentJobs().jobStatus().keySet().parallelStream() .filter(JobType::isProduction) .filter(job -> isRunning(application, job)) .collect(toList()); } /** Returns whether the given job is currently running; false if completed since last triggered, asking the build service otherwise. */ private boolean isRunning(Application application, JobType jobType) { return ! application.deploymentJobs().statusOf(jobType) .flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))) .orElse(false) && jobStateIsAmong(application, jobType, running, queued); } private boolean jobStateIsAmong(Application application, JobType jobType, JobState... states) { return Arrays.asList(states).contains(buildService.stateOf(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()))); } /** * Returns the instant when the given change is complete for the given application for the given job. * * Any job is complete if the given change is already successful on that job. * A production job is also considered complete if its current change is strictly dominated by what * is already deployed in its zone, i.e., no parts of the change are upgrades, and the full current * change for the application downgrades the deployment, which is an acknowledgement that the deployed * version is broken somehow, such that the job may be locked in failure until a new version is released. */ private Optional<Instant> completedAt(Change change, Application application, JobType jobType) { Versions versions = versions(application, change, deploymentFor(application, jobType)); Optional<JobRun> lastSuccess = successOn(application, jobType, versions); if (lastSuccess.isPresent() || ! jobType.isProduction()) return lastSuccess.map(JobRun::at); return deploymentFor(application, jobType) .filter(deployment -> ! isUpgrade(change, deployment) && isDowngrade(application.change(), deployment)) .map(Deployment::at); } private static boolean isUpgrade(Change change, Deployment deployment) { return change.upgrades(deployment.version()) || change.upgrades(deployment.applicationVersion()); } private static boolean isDowngrade(Change change, Deployment deployment) { return change.downgrades(deployment.version()) || change.downgrades(deployment.applicationVersion()); } private boolean isTested(Application application, Versions versions) { return testedAt(application, versions).isPresent() || alreadyTriggered(application, versions); } private Optional<Instant> testedAt(Application application, Versions versions) { Optional<JobRun> testRun = successOn(application, systemTest, versions); Optional<JobRun> stagingRun = successOn(application, stagingTest, versions) .filter(run -> sourcesMatchIfPresent(versions, run)); return max(testRun.map(JobRun::at), stagingRun.map(JobRun::at)) .filter(__ -> testRun.isPresent() && stagingRun.isPresent()); } private boolean alreadyTriggered(Application application, Versions versions) { return application.deploymentJobs().jobStatus().values().stream() .filter(job -> job.type().isProduction()) .anyMatch(job -> job.lastTriggered() .filter(run -> targetsMatch(versions, run)) .filter(run -> sourcesMatchIfPresent(versions, run)) .isPresent()); } /** If the given state's sources are present and differ from its targets, returns whether they are equal to those * of the given job run. */ private static boolean sourcesMatchIfPresent(Versions versions, JobRun jobRun) { return ( ! versions.sourcePlatform.filter(version -> ! version.equals(versions.targetPlatform)).isPresent() || versions.sourcePlatform.equals(jobRun.sourcePlatform())) && ( ! versions.sourceApplication.filter(version -> ! version.equals(versions.targetApplication)).isPresent() || versions.sourceApplication.equals(jobRun.sourceApplication())); } private static boolean targetsMatch(Versions versions, JobRun jobRun) { return versions.targetPlatform.equals(jobRun.platform()) && versions.targetApplication.equals(jobRun.application()); } private boolean acceptNewApplicationVersion(Application application) { if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; return ! application.changeAt(clock.instant()).platform().isPresent(); } private Change remainingChange(Application application) { List<JobType> jobs = productionStepsOf(application).isEmpty() ? jobsOf(testStepsOf(application)) : jobsOf(productionStepsOf(application)); Change change = application.change(); if (jobs.stream().allMatch(job -> completedAt(application.change().withoutApplication(), application, job).isPresent())) change = change.withoutPlatform(); if (jobs.stream().allMatch(job -> completedAt(application.change().withoutPlatform(), application, job).isPresent())) change = change.withoutApplication(); return change; } /** * Returns the list of test jobs that should run now, and that need to succeed on the given versions for it to be considered tested. */ private List<Job> testJobs(Application application, Versions versions, String reason, Instant availableSince) { List<Job> jobs = new ArrayList<>(); for (JobType jobType : jobsOf(testStepsOf(application))) { Optional<JobRun> completion = successOn(application, jobType, versions) .filter(run -> sourcesMatchIfPresent(versions, run) || jobType == systemTest); if ( ! completion.isPresent() && jobStateIsAmong(application, jobType, idle)) jobs.add(deploymentJob(application, versions, application.change(), jobType, reason, availableSince)); } return jobs; } private List<JobType> jobsOf(Collection<Step> steps) { return steps.stream().flatMap(step -> step.zones().stream()).map(order::toJob).collect(toList()); } private List<Step> testStepsOf(Application application) { return application.deploymentSpec().steps().isEmpty() ? singletonList(new DeploymentSpec.DeclaredZone(test)) : application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(test) || step.deploysTo(staging)) .collect(toList()); } private List<Step> productionStepsOf(Application application) { return application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(prod) || step.zones().isEmpty()) .collect(toList()); } private Job deploymentJob(Application application, Versions versions, Change change, JobType jobType, String reason, Instant availableSince) { boolean isRetry = application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::jobError) .filter(JobError.outOfCapacity::equals).isPresent(); if (isRetry) reason += "; retrying on out of capacity"; JobRun triggering = JobRun.triggering(versions.targetPlatform, versions.targetApplication, versions.sourcePlatform, versions.sourceApplication, reason, clock.instant()); return new Job(application, triggering, jobType, availableSince, isRetry, change.application().isPresent()); } private Versions versions(Application application, Change change, Optional<Deployment> deployment) { return new Versions(targetPlatform(application, change, deployment), targetApplication(application, change, deployment), deployment.map(Deployment::version), deployment.map(Deployment::applicationVersion)); } private Version targetPlatform(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::version), change.platform()) .orElse(application.oldestDeployedPlatform() .orElse(controller.systemVersion())); } private ApplicationVersion targetApplication(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::applicationVersion), change.application()) .orElse(application.oldestDeployedApplication() .orElse(application.deploymentJobs().jobStatus().get(component).lastSuccess().get().application())); } private static class Job extends BuildJob { private final JobType jobType; private final JobRun triggering; private final Instant availableSince; private final boolean isRetry; private final boolean isApplicationUpgrade; private Job(Application application, JobRun triggering, JobType jobType, Instant availableSince, boolean isRetry, boolean isApplicationUpgrade) { super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()); this.jobType = jobType; this.triggering = triggering; this.availableSince = availableSince; this.isRetry = isRetry; this.isApplicationUpgrade = isApplicationUpgrade; } JobType jobType() { return jobType; } Instant availableSince() { return availableSince; } boolean isRetry() { return isRetry; } boolean applicationUpgrade() { return isApplicationUpgrade; } } private static class Versions { private final Version targetPlatform; private final ApplicationVersion targetApplication; private final Optional<Version> sourcePlatform; private final Optional<ApplicationVersion> sourceApplication; private Versions(Version targetPlatform, ApplicationVersion targetApplication, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication) { this.targetPlatform = targetPlatform; this.targetApplication = targetApplication; this.sourcePlatform = sourcePlatform; this.sourceApplication = sourceApplication; } @Override public String toString() { return String.format("platform %s%s, application %s%s", sourcePlatform.filter(source -> ! source.equals(targetPlatform)) .map(source -> source + " -> ").orElse(""), targetPlatform, sourceApplication.filter(source -> ! source.equals(targetApplication)) .map(source -> source.id() + " -> ").orElse(""), targetApplication.id()); } } }
class DeploymentTrigger { private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentOrder order; private final BuildService buildService; public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) { Objects.requireNonNull(controller, "controller cannot be null"); Objects.requireNonNull(curator, "curator cannot be null"); Objects.requireNonNull(clock, "clock cannot be null"); this.controller = controller; this.clock = clock; this.order = new DeploymentOrder(controller::system); this.buildService = buildService; } public DeploymentOrder deploymentOrder() { return order; } /** * Records information when a job completes (successfully or not). This information is used when deciding what to * trigger next. */ public void notifyOfCompletion(JobReport report) { log.log(LogLevel.INFO, String.format("Notified of %s for %s of %s (%d).", report.jobError().map(e -> e.toString() + " error") .orElse("success"), report.jobType(), report.applicationId(), report.projectId())); if ( ! applications().get(report.applicationId()).isPresent()) { log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } applications().lockOrThrow(report.applicationId(), application -> { JobRun triggering; if (report.jobType() == component) { ApplicationVersion applicationVersion = ApplicationVersion.from(report.sourceRevision().get(), report.buildNumber()); triggering = JobRun.triggering(controller.systemVersion(), applicationVersion, Optional .empty(), Optional.empty(), "Application commit", clock.instant()); if (report.success()) { if (acceptNewApplicationVersion(application)) application = application.withChange(application.change().with(applicationVersion)) .withOutstandingChange(Change.empty()); else application = application.withOutstandingChange(Change.of(applicationVersion)); } } else { triggering = application.deploymentJobs().statusOf(report.jobType()).flatMap(JobStatus::lastTriggered) .orElseThrow(() -> new IllegalStateException("Notified of completion of " + report.jobType().jobName() + " for " + report.applicationId() + ", but that has neither been triggered nor deployed")); } application = application.withJobCompletion(report.projectId(), report.jobType(), triggering.completion(report.buildNumber(), clock.instant()), report.jobError()); application = application.withChange(remainingChange(application)); applications().store(application); }); } /** Returns a map of jobs that are scheduled to be run, grouped by the job type */ public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() { return computeReadyJobs().stream().collect(groupingBy(Job::jobType)); } /** * Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs. * * Only one job is triggered each run for test jobs, since their environments have limited capacity. */ public long triggerReadyJobs() { return computeReadyJobs().stream() .collect(partitioningBy(job -> job.jobType().isTest())) .entrySet().stream() .flatMap(entry -> (entry.getKey() ? entry.getValue().stream() .sorted(comparing(Job::isRetry) .thenComparing(Job::applicationUpgrade) .reversed() .thenComparing(Job::availableSince)) .collect(groupingBy(Job::jobType)) : entry.getValue().stream() .collect(groupingBy(Job::applicationId))) .values().stream() .map(jobs -> (Supplier<Long>) jobs.stream() .filter(this::trigger) .limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count)) .parallel().map(Supplier::get).reduce(0L, Long::sum); } /** * Attempts to trigger the given job for the given application and returns the outcome. * * If the build service can not find the given job, or claims it is illegal to trigger it, * the project id is removed from the application owning the job, to prevent further trigger attemps. */ public boolean trigger(Job job) { log.log(LogLevel.INFO, String.format("Triggering %s: %s", job, job.triggering)); try { buildService.trigger(job); applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withJobTriggering(job.jobType, job.triggering))); return true; } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e); if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException) applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withProjectId(OptionalLong.empty()))); return false; } } /** Force triggering of a job for given application. */ public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType, String user) { Application application = applications().require(applicationId); if (jobType == component) { buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); return singletonList(component); } Versions versions = versions(application, application.change(), deploymentFor(application, jobType)); String reason = "Job triggered manually by " + user; return (jobType.isProduction() && ! isTested(application, versions) ? testJobs(application, versions, reason, clock.instant()).stream() : Stream.of(deploymentJob(application, versions, application.change(), jobType, reason, clock.instant()))) .peek(this::trigger) .map(Job::jobType).collect(toList()); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already has an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application); }); } /** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .filter(__ -> keepApplicationChange) .map(Change::of) .orElse(Change.empty()))); }); } private ApplicationController applications() { return controller.applications(); } private Optional<JobRun> successOn(Application application, JobType jobType, Versions versions) { return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .filter(run -> targetsMatch(versions, run)); } private Optional<Deployment> deploymentFor(Application application, JobType jobType) { return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()).get())); } private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) { return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2; } /** Returns the set of all jobs which have changes to propagate from the upstream steps. */ private List<Job> computeReadyJobs() { return ApplicationList.from(applications().asList()) .notPullRequest() .withProjectId() .deploying() .idList().stream() .map(this::computeReadyJobs) .flatMap(Collection::stream) .collect(toList()); } /** * Finds the next step to trigger for the given application, if any, and returns these as a list. */ private List<JobType> runningProductionJobs(Application application) { return application.deploymentJobs().jobStatus().keySet().parallelStream() .filter(JobType::isProduction) .filter(job -> isRunning(application, job)) .collect(toList()); } /** Returns whether the given job is currently running; false if completed since last triggered, asking the build service otherwise. */ private boolean isRunning(Application application, JobType jobType) { return ! application.deploymentJobs().statusOf(jobType) .flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))) .orElse(false) && EnumSet.of(running, queued).contains(jobStateOf(application, jobType)); } private JobState jobStateOf(Application application, JobType jobType) { return buildService.stateOf(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); } /** * Returns whether the given change is complete for the given application for the given job. * * Any job is complete if the given change is already successful on that job. * A production job is also considered complete if its current change is strictly dominated by what * is already deployed in its zone, i.e., no parts of the change are upgrades, and the full current * change for the application downgrades the deployment, which is an acknowledgement that the deployed * version is broken somehow, such that the job may be locked in failure until a new version is released. */ private boolean isComplete(Change change, Application application, JobType jobType) { Optional<Deployment> existingDeployment = deploymentFor(application, jobType); return successOn(application, jobType, versions(application, change, existingDeployment)).isPresent() || jobType.isProduction() && existingDeployment.map(deployment -> ! isUpgrade(change, deployment) && isDowngrade(application.change(), deployment)) .orElse(false); } private static boolean isUpgrade(Change change, Deployment deployment) { return change.upgrades(deployment.version()) || change.upgrades(deployment.applicationVersion()); } private static boolean isDowngrade(Change change, Deployment deployment) { return change.downgrades(deployment.version()) || change.downgrades(deployment.applicationVersion()); } private boolean isTested(Application application, Versions versions) { return testedAt(application, versions).isPresent() || alreadyTriggered(application, versions); } private Optional<Instant> testedAt(Application application, Versions versions) { Optional<JobRun> testRun = successOn(application, systemTest, versions); Optional<JobRun> stagingRun = successOn(application, stagingTest, versions) .filter(run -> sourcesMatchIfPresent(versions, run)); return max(testRun.map(JobRun::at), stagingRun.map(JobRun::at)) .filter(__ -> testRun.isPresent() && stagingRun.isPresent()); } private boolean alreadyTriggered(Application application, Versions versions) { return application.deploymentJobs().jobStatus().values().stream() .filter(job -> job.type().isProduction()) .anyMatch(job -> job.lastTriggered() .filter(run -> targetsMatch(versions, run)) .filter(run -> sourcesMatchIfPresent(versions, run)) .isPresent()); } /** If the given state's sources are present and differ from its targets, returns whether they are equal to those * of the given job run. */ private static boolean sourcesMatchIfPresent(Versions versions, JobRun jobRun) { return ( ! versions.sourcePlatform.filter(version -> ! version.equals(versions.targetPlatform)).isPresent() || versions.sourcePlatform.equals(jobRun.sourcePlatform())) && ( ! versions.sourceApplication.filter(version -> ! version.equals(versions.targetApplication)).isPresent() || versions.sourceApplication.equals(jobRun.sourceApplication())); } private static boolean targetsMatch(Versions versions, JobRun jobRun) { return versions.targetPlatform.equals(jobRun.platform()) && versions.targetApplication.equals(jobRun.application()); } private boolean acceptNewApplicationVersion(Application application) { if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; return ! application.changeAt(clock.instant()).platform().isPresent(); } private Change remainingChange(Application application) { List<JobType> jobs = productionStepsOf(application).isEmpty() ? jobsOf(testStepsOf(application)) : jobsOf(productionStepsOf(application)); Change change = application.change(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutApplication(), application, job))) change = change.withoutPlatform(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutPlatform(), application, job))) change = change.withoutApplication(); return change; } /** * Returns the list of test jobs that should run now, and that need to succeed on the given versions for it to be considered tested. */ private List<Job> testJobs(Application application, Versions versions, String reason, Instant availableSince) { List<Job> jobs = new ArrayList<>(); for (JobType jobType : jobsOf(testStepsOf(application))) { Optional<JobRun> completion = successOn(application, jobType, versions) .filter(run -> sourcesMatchIfPresent(versions, run) || jobType == systemTest); if ( ! completion.isPresent() && jobStateOf(application, jobType) == idle) jobs.add(deploymentJob(application, versions, application.change(), jobType, reason, availableSince)); } return jobs; } private List<JobType> jobsOf(Collection<Step> steps) { return steps.stream().flatMap(step -> step.zones().stream()).map(order::toJob).collect(toList()); } private List<Step> testStepsOf(Application application) { return application.deploymentSpec().steps().isEmpty() ? singletonList(new DeploymentSpec.DeclaredZone(test)) : application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(test) || step.deploysTo(staging)) .collect(toList()); } private List<Step> productionStepsOf(Application application) { return application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(prod) || step.zones().isEmpty()) .collect(toList()); } private Job deploymentJob(Application application, Versions versions, Change change, JobType jobType, String reason, Instant availableSince) { boolean isRetry = application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::jobError) .filter(JobError.outOfCapacity::equals).isPresent(); if (isRetry) reason += "; retrying on out of capacity"; JobRun triggering = JobRun.triggering(versions.targetPlatform, versions.targetApplication, versions.sourcePlatform, versions.sourceApplication, reason, clock.instant()); return new Job(application, triggering, jobType, availableSince, isRetry, change.application().isPresent()); } private Versions versions(Application application, Change change, Optional<Deployment> deployment) { return new Versions(targetPlatform(application, change, deployment), targetApplication(application, change, deployment), deployment.map(Deployment::version), deployment.map(Deployment::applicationVersion)); } private Version targetPlatform(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::version), change.platform()) .orElse(application.oldestDeployedPlatform() .orElse(controller.systemVersion())); } private ApplicationVersion targetApplication(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::applicationVersion), change.application()) .orElse(application.oldestDeployedApplication() .orElse(application.deploymentJobs().jobStatus().get(component).lastSuccess().get().application())); } private static class Job extends BuildJob { private final JobType jobType; private final JobRun triggering; private final Instant availableSince; private final boolean isRetry; private final boolean isApplicationUpgrade; private Job(Application application, JobRun triggering, JobType jobType, Instant availableSince, boolean isRetry, boolean isApplicationUpgrade) { super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()); this.jobType = jobType; this.triggering = triggering; this.availableSince = availableSince; this.isRetry = isRetry; this.isApplicationUpgrade = isApplicationUpgrade; } JobType jobType() { return jobType; } Instant availableSince() { return availableSince; } boolean isRetry() { return isRetry; } boolean applicationUpgrade() { return isApplicationUpgrade; } } private static class Versions { private final Version targetPlatform; private final ApplicationVersion targetApplication; private final Optional<Version> sourcePlatform; private final Optional<ApplicationVersion> sourceApplication; private Versions(Version targetPlatform, ApplicationVersion targetApplication, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication) { this.targetPlatform = targetPlatform; this.targetApplication = targetApplication; this.sourcePlatform = sourcePlatform; this.sourceApplication = sourceApplication; } @Override public String toString() { return String.format("platform %s%s, application %s%s", sourcePlatform.filter(source -> ! source.equals(targetPlatform)) .map(source -> source + " -> ").orElse(""), targetPlatform, sourceApplication.filter(source -> ! source.equals(targetApplication)) .map(source -> source.id() + " -> ").orElse(""), targetApplication.id()); } } }
Please consider the approach taken in the last commit: `Increase readability at the cost of code duplication`
private List<Job> computeReadyJobs(ApplicationId id) { List<Job> jobs = new ArrayList<>(); applications().get(id).ifPresent(application -> { Change change = application.changeAt(clock.instant()); Optional<Instant> completedAt = max(application.deploymentJobs().statusOf(systemTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at)), application.deploymentJobs().statusOf(stagingTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at))); String reason = "New change available"; List<Job> testJobs = null; if (change.isPresent()) for (Step step : productionStepsOf(application)) { Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet()); Map<Optional<Instant>, List<JobType>> jobsByCompletion = stepJobs.stream().collect(groupingBy(job -> completedAt(change, application, job))); if (jobsByCompletion.containsKey(Optional.empty())) { for (JobType job : jobsByCompletion.get(Optional.empty())) { Versions versions = versions(application, change, deploymentFor(application, job)); if (isTested(application, versions)) { if ( completedAt.isPresent() && jobStateIsAmong(application, job, idle) && stepJobs.containsAll(runningProductionJobs(application))) jobs.add(deploymentJob(application, versions, change, job, reason, completedAt.get())); if ( ! alreadyTriggered(application, versions)) testJobs = emptyList(); } else if (testJobs == null) { testJobs = testJobs(application, versions, String.format("Testing deployment for %s (%s)", job.jobName(), versions.toString()), completedAt.orElse(clock.instant())); } } completedAt = Optional.empty(); } else { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = jobsByCompletion.keySet().stream().map(Optional::get).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } } if (testJobs == null) testJobs = testJobs(application, versions(application, application.change(), Optional.empty()), "Testing last changes outside prod", clock.instant()); jobs.addAll(testJobs); }); return jobs; }
if (jobsByCompletion.containsKey(Optional.empty())) {
private List<Job> computeReadyJobs(ApplicationId id) { List<Job> jobs = new ArrayList<>(); applications().get(id).ifPresent(application -> { Change change = application.changeAt(clock.instant()); Optional<Instant> completedAt = max(application.deploymentJobs().statusOf(systemTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at)), application.deploymentJobs().statusOf(stagingTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at))); String reason = "New change available"; List<Job> testJobs = null; if (change.isPresent()) for (Step step : productionStepsOf(application)) { Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet()); List<JobType> remainingJobs = stepJobs.stream().filter(job -> ! isComplete(change, application, job)).collect(toList()); if ( ! remainingJobs.isEmpty()) { for (JobType job : remainingJobs) { Versions versions = versions(application, change, deploymentFor(application, job)); if (isTested(application, versions)) { if ( completedAt.isPresent() && jobStateOf(application, job) == idle && stepJobs.containsAll(runningProductionJobs(application))) jobs.add(deploymentJob(application, versions, change, job, reason, completedAt.get())); if ( ! alreadyTriggered(application, versions)) testJobs = emptyList(); } else if (testJobs == null) { testJobs = testJobs(application, versions, String.format("Testing deployment for %s (%s)", job.jobName(), versions.toString()), completedAt.orElse(clock.instant())); } } completedAt = Optional.empty(); } else { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = stepJobs.stream().map(job -> application.deploymentJobs().statusOf(job).get().lastCompleted().get().at()).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } } if (testJobs == null) testJobs = testJobs(application, versions(application, application.change(), Optional.empty()), "Testing last changes outside prod", clock.instant()); jobs.addAll(testJobs); }); return jobs; }
class DeploymentTrigger { private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentOrder order; private final BuildService buildService; public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) { Objects.requireNonNull(controller, "controller cannot be null"); Objects.requireNonNull(curator, "curator cannot be null"); Objects.requireNonNull(clock, "clock cannot be null"); this.controller = controller; this.clock = clock; this.order = new DeploymentOrder(controller::system); this.buildService = buildService; } public DeploymentOrder deploymentOrder() { return order; } /** * Records information when a job completes (successfully or not). This information is used when deciding what to * trigger next. */ public void notifyOfCompletion(JobReport report) { log.log(LogLevel.INFO, String.format("Notified of %s for %s of %s (%d).", report.jobError().map(e -> e.toString() + " error") .orElse("success"), report.jobType(), report.applicationId(), report.projectId())); if ( ! applications().get(report.applicationId()).isPresent()) { log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } applications().lockOrThrow(report.applicationId(), application -> { JobRun triggering; if (report.jobType() == component) { ApplicationVersion applicationVersion = ApplicationVersion.from(report.sourceRevision().get(), report.buildNumber()); triggering = JobRun.triggering(controller.systemVersion(), applicationVersion, Optional .empty(), Optional.empty(), "Application commit", clock.instant()); if (report.success()) { if (acceptNewApplicationVersion(application)) application = application.withChange(application.change().with(applicationVersion)) .withOutstandingChange(Change.empty()); else application = application.withOutstandingChange(Change.of(applicationVersion)); } } else { triggering = application.deploymentJobs().statusOf(report.jobType()).flatMap(JobStatus::lastTriggered) .orElseThrow(() -> new IllegalStateException("Notified of completion of " + report.jobType().jobName() + " for " + report.applicationId() + ", but that has neither been triggered nor deployed")); } application = application.withJobCompletion(report.projectId(), report.jobType(), triggering.completion(report.buildNumber(), clock.instant()), report.jobError()); application = application.withChange(remainingChange(application)); applications().store(application); }); } /** Returns a map of jobs that are scheduled to be run, grouped by the job type */ public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() { return computeReadyJobs().stream().collect(groupingBy(Job::jobType)); } /** * Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs. * * Only one job is triggered each run for test jobs, since their environments have limited capacity. */ public long triggerReadyJobs() { return computeReadyJobs().stream() .collect(partitioningBy(job -> job.jobType().isTest())) .entrySet().stream() .flatMap(entry -> (entry.getKey() ? entry.getValue().stream() .sorted(comparing(Job::isRetry) .thenComparing(Job::applicationUpgrade) .reversed() .thenComparing(Job::availableSince)) .collect(groupingBy(Job::jobType)) : entry.getValue().stream() .collect(groupingBy(Job::applicationId))) .values().stream() .map(jobs -> (Supplier<Long>) jobs.stream() .filter(this::trigger) .limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count)) .parallel().map(Supplier::get).reduce(0L, Long::sum); } /** * Attempts to trigger the given job for the given application and returns the outcome. * * If the build service can not find the given job, or claims it is illegal to trigger it, * the project id is removed from the application owning the job, to prevent further trigger attemps. */ public boolean trigger(Job job) { log.log(LogLevel.INFO, String.format("Triggering %s: %s", job, job.triggering)); try { buildService.trigger(job); applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withJobTriggering(job.jobType, job.triggering))); return true; } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e); if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException) applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withProjectId(OptionalLong.empty()))); return false; } } /** Force triggering of a job for given application. */ public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType, String user) { Application application = applications().require(applicationId); if (jobType == component) { buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); return singletonList(component); } Versions versions = versions(application, application.change(), deploymentFor(application, jobType)); String reason = "Job triggered manually by " + user; return (jobType.isProduction() && ! isTested(application, versions) ? testJobs(application, versions, reason, clock.instant()).stream() : Stream.of(deploymentJob(application, versions, application.change(), jobType, reason, clock.instant()))) .peek(this::trigger) .map(Job::jobType).collect(toList()); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already has an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application); }); } /** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .filter(__ -> keepApplicationChange) .map(Change::of) .orElse(Change.empty()))); }); } private ApplicationController applications() { return controller.applications(); } private Optional<JobRun> successOn(Application application, JobType jobType, Versions versions) { return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .filter(run -> targetsMatch(versions, run)); } private Optional<Deployment> deploymentFor(Application application, JobType jobType) { return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()).get())); } private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) { return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2; } /** Returns the set of all jobs which have changes to propagate from the upstream steps. */ private List<Job> computeReadyJobs() { return ApplicationList.from(applications().asList()) .notPullRequest() .withProjectId() .deploying() .idList().stream() .map(this::computeReadyJobs) .flatMap(Collection::stream) .collect(toList()); } /** * Finds the next step to trigger for the given application, if any, and returns these as a list. */ private List<JobType> runningProductionJobs(Application application) { return application.deploymentJobs().jobStatus().keySet().parallelStream() .filter(JobType::isProduction) .filter(job -> isRunning(application, job)) .collect(toList()); } /** Returns whether the given job is currently running; false if completed since last triggered, asking the build service otherwise. */ private boolean isRunning(Application application, JobType jobType) { return ! application.deploymentJobs().statusOf(jobType) .flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))) .orElse(false) && jobStateIsAmong(application, jobType, running, queued); } private boolean jobStateIsAmong(Application application, JobType jobType, JobState... states) { return Arrays.asList(states).contains(buildService.stateOf(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()))); } /** * Returns the instant when the given change is complete for the given application for the given job. * * Any job is complete if the given change is already successful on that job. * A production job is also considered complete if its current change is strictly dominated by what * is already deployed in its zone, i.e., no parts of the change are upgrades, and the full current * change for the application downgrades the deployment, which is an acknowledgement that the deployed * version is broken somehow, such that the job may be locked in failure until a new version is released. */ private Optional<Instant> completedAt(Change change, Application application, JobType jobType) { Versions versions = versions(application, change, deploymentFor(application, jobType)); Optional<JobRun> lastSuccess = successOn(application, jobType, versions); if (lastSuccess.isPresent() || ! jobType.isProduction()) return lastSuccess.map(JobRun::at); return deploymentFor(application, jobType) .filter(deployment -> ! isUpgrade(change, deployment) && isDowngrade(application.change(), deployment)) .map(Deployment::at); } private static boolean isUpgrade(Change change, Deployment deployment) { return change.upgrades(deployment.version()) || change.upgrades(deployment.applicationVersion()); } private static boolean isDowngrade(Change change, Deployment deployment) { return change.downgrades(deployment.version()) || change.downgrades(deployment.applicationVersion()); } private boolean isTested(Application application, Versions versions) { return testedAt(application, versions).isPresent() || alreadyTriggered(application, versions); } private Optional<Instant> testedAt(Application application, Versions versions) { Optional<JobRun> testRun = successOn(application, systemTest, versions); Optional<JobRun> stagingRun = successOn(application, stagingTest, versions) .filter(run -> sourcesMatchIfPresent(versions, run)); return max(testRun.map(JobRun::at), stagingRun.map(JobRun::at)) .filter(__ -> testRun.isPresent() && stagingRun.isPresent()); } private boolean alreadyTriggered(Application application, Versions versions) { return application.deploymentJobs().jobStatus().values().stream() .filter(job -> job.type().isProduction()) .anyMatch(job -> job.lastTriggered() .filter(run -> targetsMatch(versions, run)) .filter(run -> sourcesMatchIfPresent(versions, run)) .isPresent()); } /** If the given state's sources are present and differ from its targets, returns whether they are equal to those * of the given job run. */ private static boolean sourcesMatchIfPresent(Versions versions, JobRun jobRun) { return ( ! versions.sourcePlatform.filter(version -> ! version.equals(versions.targetPlatform)).isPresent() || versions.sourcePlatform.equals(jobRun.sourcePlatform())) && ( ! versions.sourceApplication.filter(version -> ! version.equals(versions.targetApplication)).isPresent() || versions.sourceApplication.equals(jobRun.sourceApplication())); } private static boolean targetsMatch(Versions versions, JobRun jobRun) { return versions.targetPlatform.equals(jobRun.platform()) && versions.targetApplication.equals(jobRun.application()); } private boolean acceptNewApplicationVersion(Application application) { if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; return ! application.changeAt(clock.instant()).platform().isPresent(); } private Change remainingChange(Application application) { List<JobType> jobs = productionStepsOf(application).isEmpty() ? jobsOf(testStepsOf(application)) : jobsOf(productionStepsOf(application)); Change change = application.change(); if (jobs.stream().allMatch(job -> completedAt(application.change().withoutApplication(), application, job).isPresent())) change = change.withoutPlatform(); if (jobs.stream().allMatch(job -> completedAt(application.change().withoutPlatform(), application, job).isPresent())) change = change.withoutApplication(); return change; } /** * Returns the list of test jobs that should run now, and that need to succeed on the given versions for it to be considered tested. */ private List<Job> testJobs(Application application, Versions versions, String reason, Instant availableSince) { List<Job> jobs = new ArrayList<>(); for (JobType jobType : jobsOf(testStepsOf(application))) { Optional<JobRun> completion = successOn(application, jobType, versions) .filter(run -> sourcesMatchIfPresent(versions, run) || jobType == systemTest); if ( ! completion.isPresent() && jobStateIsAmong(application, jobType, idle)) jobs.add(deploymentJob(application, versions, application.change(), jobType, reason, availableSince)); } return jobs; } private List<JobType> jobsOf(Collection<Step> steps) { return steps.stream().flatMap(step -> step.zones().stream()).map(order::toJob).collect(toList()); } private List<Step> testStepsOf(Application application) { return application.deploymentSpec().steps().isEmpty() ? singletonList(new DeploymentSpec.DeclaredZone(test)) : application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(test) || step.deploysTo(staging)) .collect(toList()); } private List<Step> productionStepsOf(Application application) { return application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(prod) || step.zones().isEmpty()) .collect(toList()); } private Job deploymentJob(Application application, Versions versions, Change change, JobType jobType, String reason, Instant availableSince) { boolean isRetry = application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::jobError) .filter(JobError.outOfCapacity::equals).isPresent(); if (isRetry) reason += "; retrying on out of capacity"; JobRun triggering = JobRun.triggering(versions.targetPlatform, versions.targetApplication, versions.sourcePlatform, versions.sourceApplication, reason, clock.instant()); return new Job(application, triggering, jobType, availableSince, isRetry, change.application().isPresent()); } private Versions versions(Application application, Change change, Optional<Deployment> deployment) { return new Versions(targetPlatform(application, change, deployment), targetApplication(application, change, deployment), deployment.map(Deployment::version), deployment.map(Deployment::applicationVersion)); } private Version targetPlatform(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::version), change.platform()) .orElse(application.oldestDeployedPlatform() .orElse(controller.systemVersion())); } private ApplicationVersion targetApplication(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::applicationVersion), change.application()) .orElse(application.oldestDeployedApplication() .orElse(application.deploymentJobs().jobStatus().get(component).lastSuccess().get().application())); } private static class Job extends BuildJob { private final JobType jobType; private final JobRun triggering; private final Instant availableSince; private final boolean isRetry; private final boolean isApplicationUpgrade; private Job(Application application, JobRun triggering, JobType jobType, Instant availableSince, boolean isRetry, boolean isApplicationUpgrade) { super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()); this.jobType = jobType; this.triggering = triggering; this.availableSince = availableSince; this.isRetry = isRetry; this.isApplicationUpgrade = isApplicationUpgrade; } JobType jobType() { return jobType; } Instant availableSince() { return availableSince; } boolean isRetry() { return isRetry; } boolean applicationUpgrade() { return isApplicationUpgrade; } } private static class Versions { private final Version targetPlatform; private final ApplicationVersion targetApplication; private final Optional<Version> sourcePlatform; private final Optional<ApplicationVersion> sourceApplication; private Versions(Version targetPlatform, ApplicationVersion targetApplication, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication) { this.targetPlatform = targetPlatform; this.targetApplication = targetApplication; this.sourcePlatform = sourcePlatform; this.sourceApplication = sourceApplication; } @Override public String toString() { return String.format("platform %s%s, application %s%s", sourcePlatform.filter(source -> ! source.equals(targetPlatform)) .map(source -> source + " -> ").orElse(""), targetPlatform, sourceApplication.filter(source -> ! source.equals(targetApplication)) .map(source -> source.id() + " -> ").orElse(""), targetApplication.id()); } } }
class DeploymentTrigger { private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentOrder order; private final BuildService buildService; public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) { Objects.requireNonNull(controller, "controller cannot be null"); Objects.requireNonNull(curator, "curator cannot be null"); Objects.requireNonNull(clock, "clock cannot be null"); this.controller = controller; this.clock = clock; this.order = new DeploymentOrder(controller::system); this.buildService = buildService; } public DeploymentOrder deploymentOrder() { return order; } /** * Records information when a job completes (successfully or not). This information is used when deciding what to * trigger next. */ public void notifyOfCompletion(JobReport report) { log.log(LogLevel.INFO, String.format("Notified of %s for %s of %s (%d).", report.jobError().map(e -> e.toString() + " error") .orElse("success"), report.jobType(), report.applicationId(), report.projectId())); if ( ! applications().get(report.applicationId()).isPresent()) { log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } applications().lockOrThrow(report.applicationId(), application -> { JobRun triggering; if (report.jobType() == component) { ApplicationVersion applicationVersion = ApplicationVersion.from(report.sourceRevision().get(), report.buildNumber()); triggering = JobRun.triggering(controller.systemVersion(), applicationVersion, Optional .empty(), Optional.empty(), "Application commit", clock.instant()); if (report.success()) { if (acceptNewApplicationVersion(application)) application = application.withChange(application.change().with(applicationVersion)) .withOutstandingChange(Change.empty()); else application = application.withOutstandingChange(Change.of(applicationVersion)); } } else { triggering = application.deploymentJobs().statusOf(report.jobType()).flatMap(JobStatus::lastTriggered) .orElseThrow(() -> new IllegalStateException("Notified of completion of " + report.jobType().jobName() + " for " + report.applicationId() + ", but that has neither been triggered nor deployed")); } application = application.withJobCompletion(report.projectId(), report.jobType(), triggering.completion(report.buildNumber(), clock.instant()), report.jobError()); application = application.withChange(remainingChange(application)); applications().store(application); }); } /** Returns a map of jobs that are scheduled to be run, grouped by the job type */ public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() { return computeReadyJobs().stream().collect(groupingBy(Job::jobType)); } /** * Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs. * * Only one job is triggered each run for test jobs, since their environments have limited capacity. */ public long triggerReadyJobs() { return computeReadyJobs().stream() .collect(partitioningBy(job -> job.jobType().isTest())) .entrySet().stream() .flatMap(entry -> (entry.getKey() ? entry.getValue().stream() .sorted(comparing(Job::isRetry) .thenComparing(Job::applicationUpgrade) .reversed() .thenComparing(Job::availableSince)) .collect(groupingBy(Job::jobType)) : entry.getValue().stream() .collect(groupingBy(Job::applicationId))) .values().stream() .map(jobs -> (Supplier<Long>) jobs.stream() .filter(this::trigger) .limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count)) .parallel().map(Supplier::get).reduce(0L, Long::sum); } /** * Attempts to trigger the given job for the given application and returns the outcome. * * If the build service can not find the given job, or claims it is illegal to trigger it, * the project id is removed from the application owning the job, to prevent further trigger attemps. */ public boolean trigger(Job job) { log.log(LogLevel.INFO, String.format("Triggering %s: %s", job, job.triggering)); try { buildService.trigger(job); applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withJobTriggering(job.jobType, job.triggering))); return true; } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e); if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException) applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withProjectId(OptionalLong.empty()))); return false; } } /** Force triggering of a job for given application. */ public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType, String user) { Application application = applications().require(applicationId); if (jobType == component) { buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); return singletonList(component); } Versions versions = versions(application, application.change(), deploymentFor(application, jobType)); String reason = "Job triggered manually by " + user; return (jobType.isProduction() && ! isTested(application, versions) ? testJobs(application, versions, reason, clock.instant()).stream() : Stream.of(deploymentJob(application, versions, application.change(), jobType, reason, clock.instant()))) .peek(this::trigger) .map(Job::jobType).collect(toList()); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already has an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application); }); } /** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .filter(__ -> keepApplicationChange) .map(Change::of) .orElse(Change.empty()))); }); } private ApplicationController applications() { return controller.applications(); } private Optional<JobRun> successOn(Application application, JobType jobType, Versions versions) { return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .filter(run -> targetsMatch(versions, run)); } private Optional<Deployment> deploymentFor(Application application, JobType jobType) { return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()).get())); } private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) { return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2; } /** Returns the set of all jobs which have changes to propagate from the upstream steps. */ private List<Job> computeReadyJobs() { return ApplicationList.from(applications().asList()) .notPullRequest() .withProjectId() .deploying() .idList().stream() .map(this::computeReadyJobs) .flatMap(Collection::stream) .collect(toList()); } /** * Finds the next step to trigger for the given application, if any, and returns these as a list. */ private List<JobType> runningProductionJobs(Application application) { return application.deploymentJobs().jobStatus().keySet().parallelStream() .filter(JobType::isProduction) .filter(job -> isRunning(application, job)) .collect(toList()); } /** Returns whether the given job is currently running; false if completed since last triggered, asking the build service otherwise. */ private boolean isRunning(Application application, JobType jobType) { return ! application.deploymentJobs().statusOf(jobType) .flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))) .orElse(false) && EnumSet.of(running, queued).contains(jobStateOf(application, jobType)); } private JobState jobStateOf(Application application, JobType jobType) { return buildService.stateOf(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); } /** * Returns whether the given change is complete for the given application for the given job. * * Any job is complete if the given change is already successful on that job. * A production job is also considered complete if its current change is strictly dominated by what * is already deployed in its zone, i.e., no parts of the change are upgrades, and the full current * change for the application downgrades the deployment, which is an acknowledgement that the deployed * version is broken somehow, such that the job may be locked in failure until a new version is released. */ private boolean isComplete(Change change, Application application, JobType jobType) { Optional<Deployment> existingDeployment = deploymentFor(application, jobType); return successOn(application, jobType, versions(application, change, existingDeployment)).isPresent() || jobType.isProduction() && existingDeployment.map(deployment -> ! isUpgrade(change, deployment) && isDowngrade(application.change(), deployment)) .orElse(false); } private static boolean isUpgrade(Change change, Deployment deployment) { return change.upgrades(deployment.version()) || change.upgrades(deployment.applicationVersion()); } private static boolean isDowngrade(Change change, Deployment deployment) { return change.downgrades(deployment.version()) || change.downgrades(deployment.applicationVersion()); } private boolean isTested(Application application, Versions versions) { return testedAt(application, versions).isPresent() || alreadyTriggered(application, versions); } private Optional<Instant> testedAt(Application application, Versions versions) { Optional<JobRun> testRun = successOn(application, systemTest, versions); Optional<JobRun> stagingRun = successOn(application, stagingTest, versions) .filter(run -> sourcesMatchIfPresent(versions, run)); return max(testRun.map(JobRun::at), stagingRun.map(JobRun::at)) .filter(__ -> testRun.isPresent() && stagingRun.isPresent()); } private boolean alreadyTriggered(Application application, Versions versions) { return application.deploymentJobs().jobStatus().values().stream() .filter(job -> job.type().isProduction()) .anyMatch(job -> job.lastTriggered() .filter(run -> targetsMatch(versions, run)) .filter(run -> sourcesMatchIfPresent(versions, run)) .isPresent()); } /** If the given state's sources are present and differ from its targets, returns whether they are equal to those * of the given job run. */ private static boolean sourcesMatchIfPresent(Versions versions, JobRun jobRun) { return ( ! versions.sourcePlatform.filter(version -> ! version.equals(versions.targetPlatform)).isPresent() || versions.sourcePlatform.equals(jobRun.sourcePlatform())) && ( ! versions.sourceApplication.filter(version -> ! version.equals(versions.targetApplication)).isPresent() || versions.sourceApplication.equals(jobRun.sourceApplication())); } private static boolean targetsMatch(Versions versions, JobRun jobRun) { return versions.targetPlatform.equals(jobRun.platform()) && versions.targetApplication.equals(jobRun.application()); } private boolean acceptNewApplicationVersion(Application application) { if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; return ! application.changeAt(clock.instant()).platform().isPresent(); } private Change remainingChange(Application application) { List<JobType> jobs = productionStepsOf(application).isEmpty() ? jobsOf(testStepsOf(application)) : jobsOf(productionStepsOf(application)); Change change = application.change(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutApplication(), application, job))) change = change.withoutPlatform(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutPlatform(), application, job))) change = change.withoutApplication(); return change; } /** * Returns the list of test jobs that should run now, and that need to succeed on the given versions for it to be considered tested. */ private List<Job> testJobs(Application application, Versions versions, String reason, Instant availableSince) { List<Job> jobs = new ArrayList<>(); for (JobType jobType : jobsOf(testStepsOf(application))) { Optional<JobRun> completion = successOn(application, jobType, versions) .filter(run -> sourcesMatchIfPresent(versions, run) || jobType == systemTest); if ( ! completion.isPresent() && jobStateOf(application, jobType) == idle) jobs.add(deploymentJob(application, versions, application.change(), jobType, reason, availableSince)); } return jobs; } private List<JobType> jobsOf(Collection<Step> steps) { return steps.stream().flatMap(step -> step.zones().stream()).map(order::toJob).collect(toList()); } private List<Step> testStepsOf(Application application) { return application.deploymentSpec().steps().isEmpty() ? singletonList(new DeploymentSpec.DeclaredZone(test)) : application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(test) || step.deploysTo(staging)) .collect(toList()); } private List<Step> productionStepsOf(Application application) { return application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(prod) || step.zones().isEmpty()) .collect(toList()); } private Job deploymentJob(Application application, Versions versions, Change change, JobType jobType, String reason, Instant availableSince) { boolean isRetry = application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::jobError) .filter(JobError.outOfCapacity::equals).isPresent(); if (isRetry) reason += "; retrying on out of capacity"; JobRun triggering = JobRun.triggering(versions.targetPlatform, versions.targetApplication, versions.sourcePlatform, versions.sourceApplication, reason, clock.instant()); return new Job(application, triggering, jobType, availableSince, isRetry, change.application().isPresent()); } private Versions versions(Application application, Change change, Optional<Deployment> deployment) { return new Versions(targetPlatform(application, change, deployment), targetApplication(application, change, deployment), deployment.map(Deployment::version), deployment.map(Deployment::applicationVersion)); } private Version targetPlatform(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::version), change.platform()) .orElse(application.oldestDeployedPlatform() .orElse(controller.systemVersion())); } private ApplicationVersion targetApplication(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::applicationVersion), change.application()) .orElse(application.oldestDeployedApplication() .orElse(application.deploymentJobs().jobStatus().get(component).lastSuccess().get().application())); } private static class Job extends BuildJob { private final JobType jobType; private final JobRun triggering; private final Instant availableSince; private final boolean isRetry; private final boolean isApplicationUpgrade; private Job(Application application, JobRun triggering, JobType jobType, Instant availableSince, boolean isRetry, boolean isApplicationUpgrade) { super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()); this.jobType = jobType; this.triggering = triggering; this.availableSince = availableSince; this.isRetry = isRetry; this.isApplicationUpgrade = isApplicationUpgrade; } JobType jobType() { return jobType; } Instant availableSince() { return availableSince; } boolean isRetry() { return isRetry; } boolean applicationUpgrade() { return isApplicationUpgrade; } } private static class Versions { private final Version targetPlatform; private final ApplicationVersion targetApplication; private final Optional<Version> sourcePlatform; private final Optional<ApplicationVersion> sourceApplication; private Versions(Version targetPlatform, ApplicationVersion targetApplication, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication) { this.targetPlatform = targetPlatform; this.targetApplication = targetApplication; this.sourcePlatform = sourcePlatform; this.sourceApplication = sourceApplication; } @Override public String toString() { return String.format("platform %s%s, application %s%s", sourcePlatform.filter(source -> ! source.equals(targetPlatform)) .map(source -> source + " -> ").orElse(""), targetPlatform, sourceApplication.filter(source -> ! source.equals(targetApplication)) .map(source -> source.id() + " -> ").orElse(""), targetApplication.id()); } } }
This is a large code base and code should readable by other developers, besides the author. This sometimes requires us to write less esoteric code. I do understand the finer details of why IntelliJ complains, type erasure and your semantics argument, but that's besides the point. Yesterday proved that this was problematic for others to read, I think that's sufficient reason to improve the code.
private List<Job> computeReadyJobs(ApplicationId id) { List<Job> jobs = new ArrayList<>(); applications().get(id).ifPresent(application -> { Change change = application.changeAt(clock.instant()); Optional<Instant> completedAt = max(application.deploymentJobs().statusOf(systemTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at)), application.deploymentJobs().statusOf(stagingTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at))); String reason = "New change available"; List<Job> testJobs = null; if (change.isPresent()) for (Step step : productionStepsOf(application)) { Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet()); Map<Optional<Instant>, List<JobType>> jobsByCompletion = stepJobs.stream().collect(groupingBy(job -> completedAt(change, application, job))); if (jobsByCompletion.containsKey(Optional.empty())) { for (JobType job : jobsByCompletion.get(Optional.empty())) { Versions versions = versions(application, change, deploymentFor(application, job)); if (isTested(application, versions)) { if ( completedAt.isPresent() && jobStateIsAmong(application, job, idle) && stepJobs.containsAll(runningProductionJobs(application))) jobs.add(deploymentJob(application, versions, change, job, reason, completedAt.get())); if ( ! alreadyTriggered(application, versions)) testJobs = emptyList(); } else if (testJobs == null) { testJobs = testJobs(application, versions, String.format("Testing deployment for %s (%s)", job.jobName(), versions.toString()), completedAt.orElse(clock.instant())); } } completedAt = Optional.empty(); } else { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = jobsByCompletion.keySet().stream().map(Optional::get).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } } if (testJobs == null) testJobs = testJobs(application, versions(application, application.change(), Optional.empty()), "Testing last changes outside prod", clock.instant()); jobs.addAll(testJobs); }); return jobs; }
if (jobsByCompletion.containsKey(Optional.empty())) {
private List<Job> computeReadyJobs(ApplicationId id) { List<Job> jobs = new ArrayList<>(); applications().get(id).ifPresent(application -> { Change change = application.changeAt(clock.instant()); Optional<Instant> completedAt = max(application.deploymentJobs().statusOf(systemTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at)), application.deploymentJobs().statusOf(stagingTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at))); String reason = "New change available"; List<Job> testJobs = null; if (change.isPresent()) for (Step step : productionStepsOf(application)) { Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet()); List<JobType> remainingJobs = stepJobs.stream().filter(job -> ! isComplete(change, application, job)).collect(toList()); if ( ! remainingJobs.isEmpty()) { for (JobType job : remainingJobs) { Versions versions = versions(application, change, deploymentFor(application, job)); if (isTested(application, versions)) { if ( completedAt.isPresent() && jobStateOf(application, job) == idle && stepJobs.containsAll(runningProductionJobs(application))) jobs.add(deploymentJob(application, versions, change, job, reason, completedAt.get())); if ( ! alreadyTriggered(application, versions)) testJobs = emptyList(); } else if (testJobs == null) { testJobs = testJobs(application, versions, String.format("Testing deployment for %s (%s)", job.jobName(), versions.toString()), completedAt.orElse(clock.instant())); } } completedAt = Optional.empty(); } else { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = stepJobs.stream().map(job -> application.deploymentJobs().statusOf(job).get().lastCompleted().get().at()).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } } if (testJobs == null) testJobs = testJobs(application, versions(application, application.change(), Optional.empty()), "Testing last changes outside prod", clock.instant()); jobs.addAll(testJobs); }); return jobs; }
class DeploymentTrigger { private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentOrder order; private final BuildService buildService; public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) { Objects.requireNonNull(controller, "controller cannot be null"); Objects.requireNonNull(curator, "curator cannot be null"); Objects.requireNonNull(clock, "clock cannot be null"); this.controller = controller; this.clock = clock; this.order = new DeploymentOrder(controller::system); this.buildService = buildService; } public DeploymentOrder deploymentOrder() { return order; } /** * Records information when a job completes (successfully or not). This information is used when deciding what to * trigger next. */ public void notifyOfCompletion(JobReport report) { log.log(LogLevel.INFO, String.format("Notified of %s for %s of %s (%d).", report.jobError().map(e -> e.toString() + " error") .orElse("success"), report.jobType(), report.applicationId(), report.projectId())); if ( ! applications().get(report.applicationId()).isPresent()) { log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } applications().lockOrThrow(report.applicationId(), application -> { JobRun triggering; if (report.jobType() == component) { ApplicationVersion applicationVersion = ApplicationVersion.from(report.sourceRevision().get(), report.buildNumber()); triggering = JobRun.triggering(controller.systemVersion(), applicationVersion, Optional .empty(), Optional.empty(), "Application commit", clock.instant()); if (report.success()) { if (acceptNewApplicationVersion(application)) application = application.withChange(application.change().with(applicationVersion)) .withOutstandingChange(Change.empty()); else application = application.withOutstandingChange(Change.of(applicationVersion)); } } else { triggering = application.deploymentJobs().statusOf(report.jobType()).flatMap(JobStatus::lastTriggered) .orElseThrow(() -> new IllegalStateException("Notified of completion of " + report.jobType().jobName() + " for " + report.applicationId() + ", but that has neither been triggered nor deployed")); } application = application.withJobCompletion(report.projectId(), report.jobType(), triggering.completion(report.buildNumber(), clock.instant()), report.jobError()); application = application.withChange(remainingChange(application)); applications().store(application); }); } /** Returns a map of jobs that are scheduled to be run, grouped by the job type */ public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() { return computeReadyJobs().stream().collect(groupingBy(Job::jobType)); } /** * Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs. * * Only one job is triggered each run for test jobs, since their environments have limited capacity. */ public long triggerReadyJobs() { return computeReadyJobs().stream() .collect(partitioningBy(job -> job.jobType().isTest())) .entrySet().stream() .flatMap(entry -> (entry.getKey() ? entry.getValue().stream() .sorted(comparing(Job::isRetry) .thenComparing(Job::applicationUpgrade) .reversed() .thenComparing(Job::availableSince)) .collect(groupingBy(Job::jobType)) : entry.getValue().stream() .collect(groupingBy(Job::applicationId))) .values().stream() .map(jobs -> (Supplier<Long>) jobs.stream() .filter(this::trigger) .limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count)) .parallel().map(Supplier::get).reduce(0L, Long::sum); } /** * Attempts to trigger the given job for the given application and returns the outcome. * * If the build service can not find the given job, or claims it is illegal to trigger it, * the project id is removed from the application owning the job, to prevent further trigger attemps. */ public boolean trigger(Job job) { log.log(LogLevel.INFO, String.format("Triggering %s: %s", job, job.triggering)); try { buildService.trigger(job); applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withJobTriggering(job.jobType, job.triggering))); return true; } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e); if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException) applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withProjectId(OptionalLong.empty()))); return false; } } /** Force triggering of a job for given application. */ public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType, String user) { Application application = applications().require(applicationId); if (jobType == component) { buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); return singletonList(component); } Versions versions = versions(application, application.change(), deploymentFor(application, jobType)); String reason = "Job triggered manually by " + user; return (jobType.isProduction() && ! isTested(application, versions) ? testJobs(application, versions, reason, clock.instant()).stream() : Stream.of(deploymentJob(application, versions, application.change(), jobType, reason, clock.instant()))) .peek(this::trigger) .map(Job::jobType).collect(toList()); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already has an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application); }); } /** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .filter(__ -> keepApplicationChange) .map(Change::of) .orElse(Change.empty()))); }); } private ApplicationController applications() { return controller.applications(); } private Optional<JobRun> successOn(Application application, JobType jobType, Versions versions) { return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .filter(run -> targetsMatch(versions, run)); } private Optional<Deployment> deploymentFor(Application application, JobType jobType) { return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()).get())); } private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) { return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2; } /** Returns the set of all jobs which have changes to propagate from the upstream steps. */ private List<Job> computeReadyJobs() { return ApplicationList.from(applications().asList()) .notPullRequest() .withProjectId() .deploying() .idList().stream() .map(this::computeReadyJobs) .flatMap(Collection::stream) .collect(toList()); } /** * Finds the next step to trigger for the given application, if any, and returns these as a list. */ private List<JobType> runningProductionJobs(Application application) { return application.deploymentJobs().jobStatus().keySet().parallelStream() .filter(JobType::isProduction) .filter(job -> isRunning(application, job)) .collect(toList()); } /** Returns whether the given job is currently running; false if completed since last triggered, asking the build service otherwise. */ private boolean isRunning(Application application, JobType jobType) { return ! application.deploymentJobs().statusOf(jobType) .flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))) .orElse(false) && jobStateIsAmong(application, jobType, running, queued); } private boolean jobStateIsAmong(Application application, JobType jobType, JobState... states) { return Arrays.asList(states).contains(buildService.stateOf(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()))); } /** * Returns the instant when the given change is complete for the given application for the given job. * * Any job is complete if the given change is already successful on that job. * A production job is also considered complete if its current change is strictly dominated by what * is already deployed in its zone, i.e., no parts of the change are upgrades, and the full current * change for the application downgrades the deployment, which is an acknowledgement that the deployed * version is broken somehow, such that the job may be locked in failure until a new version is released. */ private Optional<Instant> completedAt(Change change, Application application, JobType jobType) { Versions versions = versions(application, change, deploymentFor(application, jobType)); Optional<JobRun> lastSuccess = successOn(application, jobType, versions); if (lastSuccess.isPresent() || ! jobType.isProduction()) return lastSuccess.map(JobRun::at); return deploymentFor(application, jobType) .filter(deployment -> ! isUpgrade(change, deployment) && isDowngrade(application.change(), deployment)) .map(Deployment::at); } private static boolean isUpgrade(Change change, Deployment deployment) { return change.upgrades(deployment.version()) || change.upgrades(deployment.applicationVersion()); } private static boolean isDowngrade(Change change, Deployment deployment) { return change.downgrades(deployment.version()) || change.downgrades(deployment.applicationVersion()); } private boolean isTested(Application application, Versions versions) { return testedAt(application, versions).isPresent() || alreadyTriggered(application, versions); } private Optional<Instant> testedAt(Application application, Versions versions) { Optional<JobRun> testRun = successOn(application, systemTest, versions); Optional<JobRun> stagingRun = successOn(application, stagingTest, versions) .filter(run -> sourcesMatchIfPresent(versions, run)); return max(testRun.map(JobRun::at), stagingRun.map(JobRun::at)) .filter(__ -> testRun.isPresent() && stagingRun.isPresent()); } private boolean alreadyTriggered(Application application, Versions versions) { return application.deploymentJobs().jobStatus().values().stream() .filter(job -> job.type().isProduction()) .anyMatch(job -> job.lastTriggered() .filter(run -> targetsMatch(versions, run)) .filter(run -> sourcesMatchIfPresent(versions, run)) .isPresent()); } /** If the given state's sources are present and differ from its targets, returns whether they are equal to those * of the given job run. */ private static boolean sourcesMatchIfPresent(Versions versions, JobRun jobRun) { return ( ! versions.sourcePlatform.filter(version -> ! version.equals(versions.targetPlatform)).isPresent() || versions.sourcePlatform.equals(jobRun.sourcePlatform())) && ( ! versions.sourceApplication.filter(version -> ! version.equals(versions.targetApplication)).isPresent() || versions.sourceApplication.equals(jobRun.sourceApplication())); } private static boolean targetsMatch(Versions versions, JobRun jobRun) { return versions.targetPlatform.equals(jobRun.platform()) && versions.targetApplication.equals(jobRun.application()); } private boolean acceptNewApplicationVersion(Application application) { if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; return ! application.changeAt(clock.instant()).platform().isPresent(); } private Change remainingChange(Application application) { List<JobType> jobs = productionStepsOf(application).isEmpty() ? jobsOf(testStepsOf(application)) : jobsOf(productionStepsOf(application)); Change change = application.change(); if (jobs.stream().allMatch(job -> completedAt(application.change().withoutApplication(), application, job).isPresent())) change = change.withoutPlatform(); if (jobs.stream().allMatch(job -> completedAt(application.change().withoutPlatform(), application, job).isPresent())) change = change.withoutApplication(); return change; } /** * Returns the list of test jobs that should run now, and that need to succeed on the given versions for it to be considered tested. */ private List<Job> testJobs(Application application, Versions versions, String reason, Instant availableSince) { List<Job> jobs = new ArrayList<>(); for (JobType jobType : jobsOf(testStepsOf(application))) { Optional<JobRun> completion = successOn(application, jobType, versions) .filter(run -> sourcesMatchIfPresent(versions, run) || jobType == systemTest); if ( ! completion.isPresent() && jobStateIsAmong(application, jobType, idle)) jobs.add(deploymentJob(application, versions, application.change(), jobType, reason, availableSince)); } return jobs; } private List<JobType> jobsOf(Collection<Step> steps) { return steps.stream().flatMap(step -> step.zones().stream()).map(order::toJob).collect(toList()); } private List<Step> testStepsOf(Application application) { return application.deploymentSpec().steps().isEmpty() ? singletonList(new DeploymentSpec.DeclaredZone(test)) : application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(test) || step.deploysTo(staging)) .collect(toList()); } private List<Step> productionStepsOf(Application application) { return application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(prod) || step.zones().isEmpty()) .collect(toList()); } private Job deploymentJob(Application application, Versions versions, Change change, JobType jobType, String reason, Instant availableSince) { boolean isRetry = application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::jobError) .filter(JobError.outOfCapacity::equals).isPresent(); if (isRetry) reason += "; retrying on out of capacity"; JobRun triggering = JobRun.triggering(versions.targetPlatform, versions.targetApplication, versions.sourcePlatform, versions.sourceApplication, reason, clock.instant()); return new Job(application, triggering, jobType, availableSince, isRetry, change.application().isPresent()); } private Versions versions(Application application, Change change, Optional<Deployment> deployment) { return new Versions(targetPlatform(application, change, deployment), targetApplication(application, change, deployment), deployment.map(Deployment::version), deployment.map(Deployment::applicationVersion)); } private Version targetPlatform(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::version), change.platform()) .orElse(application.oldestDeployedPlatform() .orElse(controller.systemVersion())); } private ApplicationVersion targetApplication(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::applicationVersion), change.application()) .orElse(application.oldestDeployedApplication() .orElse(application.deploymentJobs().jobStatus().get(component).lastSuccess().get().application())); } private static class Job extends BuildJob { private final JobType jobType; private final JobRun triggering; private final Instant availableSince; private final boolean isRetry; private final boolean isApplicationUpgrade; private Job(Application application, JobRun triggering, JobType jobType, Instant availableSince, boolean isRetry, boolean isApplicationUpgrade) { super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()); this.jobType = jobType; this.triggering = triggering; this.availableSince = availableSince; this.isRetry = isRetry; this.isApplicationUpgrade = isApplicationUpgrade; } JobType jobType() { return jobType; } Instant availableSince() { return availableSince; } boolean isRetry() { return isRetry; } boolean applicationUpgrade() { return isApplicationUpgrade; } } private static class Versions { private final Version targetPlatform; private final ApplicationVersion targetApplication; private final Optional<Version> sourcePlatform; private final Optional<ApplicationVersion> sourceApplication; private Versions(Version targetPlatform, ApplicationVersion targetApplication, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication) { this.targetPlatform = targetPlatform; this.targetApplication = targetApplication; this.sourcePlatform = sourcePlatform; this.sourceApplication = sourceApplication; } @Override public String toString() { return String.format("platform %s%s, application %s%s", sourcePlatform.filter(source -> ! source.equals(targetPlatform)) .map(source -> source + " -> ").orElse(""), targetPlatform, sourceApplication.filter(source -> ! source.equals(targetApplication)) .map(source -> source.id() + " -> ").orElse(""), targetApplication.id()); } } }
class DeploymentTrigger { private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentOrder order; private final BuildService buildService; public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) { Objects.requireNonNull(controller, "controller cannot be null"); Objects.requireNonNull(curator, "curator cannot be null"); Objects.requireNonNull(clock, "clock cannot be null"); this.controller = controller; this.clock = clock; this.order = new DeploymentOrder(controller::system); this.buildService = buildService; } public DeploymentOrder deploymentOrder() { return order; } /** * Records information when a job completes (successfully or not). This information is used when deciding what to * trigger next. */ public void notifyOfCompletion(JobReport report) { log.log(LogLevel.INFO, String.format("Notified of %s for %s of %s (%d).", report.jobError().map(e -> e.toString() + " error") .orElse("success"), report.jobType(), report.applicationId(), report.projectId())); if ( ! applications().get(report.applicationId()).isPresent()) { log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } applications().lockOrThrow(report.applicationId(), application -> { JobRun triggering; if (report.jobType() == component) { ApplicationVersion applicationVersion = ApplicationVersion.from(report.sourceRevision().get(), report.buildNumber()); triggering = JobRun.triggering(controller.systemVersion(), applicationVersion, Optional .empty(), Optional.empty(), "Application commit", clock.instant()); if (report.success()) { if (acceptNewApplicationVersion(application)) application = application.withChange(application.change().with(applicationVersion)) .withOutstandingChange(Change.empty()); else application = application.withOutstandingChange(Change.of(applicationVersion)); } } else { triggering = application.deploymentJobs().statusOf(report.jobType()).flatMap(JobStatus::lastTriggered) .orElseThrow(() -> new IllegalStateException("Notified of completion of " + report.jobType().jobName() + " for " + report.applicationId() + ", but that has neither been triggered nor deployed")); } application = application.withJobCompletion(report.projectId(), report.jobType(), triggering.completion(report.buildNumber(), clock.instant()), report.jobError()); application = application.withChange(remainingChange(application)); applications().store(application); }); } /** Returns a map of jobs that are scheduled to be run, grouped by the job type */ public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() { return computeReadyJobs().stream().collect(groupingBy(Job::jobType)); } /** * Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs. * * Only one job is triggered each run for test jobs, since their environments have limited capacity. */ public long triggerReadyJobs() { return computeReadyJobs().stream() .collect(partitioningBy(job -> job.jobType().isTest())) .entrySet().stream() .flatMap(entry -> (entry.getKey() ? entry.getValue().stream() .sorted(comparing(Job::isRetry) .thenComparing(Job::applicationUpgrade) .reversed() .thenComparing(Job::availableSince)) .collect(groupingBy(Job::jobType)) : entry.getValue().stream() .collect(groupingBy(Job::applicationId))) .values().stream() .map(jobs -> (Supplier<Long>) jobs.stream() .filter(this::trigger) .limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count)) .parallel().map(Supplier::get).reduce(0L, Long::sum); } /** * Attempts to trigger the given job for the given application and returns the outcome. * * If the build service can not find the given job, or claims it is illegal to trigger it, * the project id is removed from the application owning the job, to prevent further trigger attemps. */ public boolean trigger(Job job) { log.log(LogLevel.INFO, String.format("Triggering %s: %s", job, job.triggering)); try { buildService.trigger(job); applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withJobTriggering(job.jobType, job.triggering))); return true; } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e); if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException) applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withProjectId(OptionalLong.empty()))); return false; } } /** Force triggering of a job for given application. */ public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType, String user) { Application application = applications().require(applicationId); if (jobType == component) { buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); return singletonList(component); } Versions versions = versions(application, application.change(), deploymentFor(application, jobType)); String reason = "Job triggered manually by " + user; return (jobType.isProduction() && ! isTested(application, versions) ? testJobs(application, versions, reason, clock.instant()).stream() : Stream.of(deploymentJob(application, versions, application.change(), jobType, reason, clock.instant()))) .peek(this::trigger) .map(Job::jobType).collect(toList()); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already has an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application); }); } /** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .filter(__ -> keepApplicationChange) .map(Change::of) .orElse(Change.empty()))); }); } private ApplicationController applications() { return controller.applications(); } private Optional<JobRun> successOn(Application application, JobType jobType, Versions versions) { return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .filter(run -> targetsMatch(versions, run)); } private Optional<Deployment> deploymentFor(Application application, JobType jobType) { return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()).get())); } private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) { return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2; } /** Returns the set of all jobs which have changes to propagate from the upstream steps. */ private List<Job> computeReadyJobs() { return ApplicationList.from(applications().asList()) .notPullRequest() .withProjectId() .deploying() .idList().stream() .map(this::computeReadyJobs) .flatMap(Collection::stream) .collect(toList()); } /** * Finds the next step to trigger for the given application, if any, and returns these as a list. */ private List<JobType> runningProductionJobs(Application application) { return application.deploymentJobs().jobStatus().keySet().parallelStream() .filter(JobType::isProduction) .filter(job -> isRunning(application, job)) .collect(toList()); } /** Returns whether the given job is currently running; false if completed since last triggered, asking the build service otherwise. */ private boolean isRunning(Application application, JobType jobType) { return ! application.deploymentJobs().statusOf(jobType) .flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))) .orElse(false) && EnumSet.of(running, queued).contains(jobStateOf(application, jobType)); } private JobState jobStateOf(Application application, JobType jobType) { return buildService.stateOf(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); } /** * Returns whether the given change is complete for the given application for the given job. * * Any job is complete if the given change is already successful on that job. * A production job is also considered complete if its current change is strictly dominated by what * is already deployed in its zone, i.e., no parts of the change are upgrades, and the full current * change for the application downgrades the deployment, which is an acknowledgement that the deployed * version is broken somehow, such that the job may be locked in failure until a new version is released. */ private boolean isComplete(Change change, Application application, JobType jobType) { Optional<Deployment> existingDeployment = deploymentFor(application, jobType); return successOn(application, jobType, versions(application, change, existingDeployment)).isPresent() || jobType.isProduction() && existingDeployment.map(deployment -> ! isUpgrade(change, deployment) && isDowngrade(application.change(), deployment)) .orElse(false); } private static boolean isUpgrade(Change change, Deployment deployment) { return change.upgrades(deployment.version()) || change.upgrades(deployment.applicationVersion()); } private static boolean isDowngrade(Change change, Deployment deployment) { return change.downgrades(deployment.version()) || change.downgrades(deployment.applicationVersion()); } private boolean isTested(Application application, Versions versions) { return testedAt(application, versions).isPresent() || alreadyTriggered(application, versions); } private Optional<Instant> testedAt(Application application, Versions versions) { Optional<JobRun> testRun = successOn(application, systemTest, versions); Optional<JobRun> stagingRun = successOn(application, stagingTest, versions) .filter(run -> sourcesMatchIfPresent(versions, run)); return max(testRun.map(JobRun::at), stagingRun.map(JobRun::at)) .filter(__ -> testRun.isPresent() && stagingRun.isPresent()); } private boolean alreadyTriggered(Application application, Versions versions) { return application.deploymentJobs().jobStatus().values().stream() .filter(job -> job.type().isProduction()) .anyMatch(job -> job.lastTriggered() .filter(run -> targetsMatch(versions, run)) .filter(run -> sourcesMatchIfPresent(versions, run)) .isPresent()); } /** If the given state's sources are present and differ from its targets, returns whether they are equal to those * of the given job run. */ private static boolean sourcesMatchIfPresent(Versions versions, JobRun jobRun) { return ( ! versions.sourcePlatform.filter(version -> ! version.equals(versions.targetPlatform)).isPresent() || versions.sourcePlatform.equals(jobRun.sourcePlatform())) && ( ! versions.sourceApplication.filter(version -> ! version.equals(versions.targetApplication)).isPresent() || versions.sourceApplication.equals(jobRun.sourceApplication())); } private static boolean targetsMatch(Versions versions, JobRun jobRun) { return versions.targetPlatform.equals(jobRun.platform()) && versions.targetApplication.equals(jobRun.application()); } private boolean acceptNewApplicationVersion(Application application) { if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; return ! application.changeAt(clock.instant()).platform().isPresent(); } private Change remainingChange(Application application) { List<JobType> jobs = productionStepsOf(application).isEmpty() ? jobsOf(testStepsOf(application)) : jobsOf(productionStepsOf(application)); Change change = application.change(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutApplication(), application, job))) change = change.withoutPlatform(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutPlatform(), application, job))) change = change.withoutApplication(); return change; } /** * Returns the list of test jobs that should run now, and that need to succeed on the given versions for it to be considered tested. */ private List<Job> testJobs(Application application, Versions versions, String reason, Instant availableSince) { List<Job> jobs = new ArrayList<>(); for (JobType jobType : jobsOf(testStepsOf(application))) { Optional<JobRun> completion = successOn(application, jobType, versions) .filter(run -> sourcesMatchIfPresent(versions, run) || jobType == systemTest); if ( ! completion.isPresent() && jobStateOf(application, jobType) == idle) jobs.add(deploymentJob(application, versions, application.change(), jobType, reason, availableSince)); } return jobs; } private List<JobType> jobsOf(Collection<Step> steps) { return steps.stream().flatMap(step -> step.zones().stream()).map(order::toJob).collect(toList()); } private List<Step> testStepsOf(Application application) { return application.deploymentSpec().steps().isEmpty() ? singletonList(new DeploymentSpec.DeclaredZone(test)) : application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(test) || step.deploysTo(staging)) .collect(toList()); } private List<Step> productionStepsOf(Application application) { return application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(prod) || step.zones().isEmpty()) .collect(toList()); } private Job deploymentJob(Application application, Versions versions, Change change, JobType jobType, String reason, Instant availableSince) { boolean isRetry = application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::jobError) .filter(JobError.outOfCapacity::equals).isPresent(); if (isRetry) reason += "; retrying on out of capacity"; JobRun triggering = JobRun.triggering(versions.targetPlatform, versions.targetApplication, versions.sourcePlatform, versions.sourceApplication, reason, clock.instant()); return new Job(application, triggering, jobType, availableSince, isRetry, change.application().isPresent()); } private Versions versions(Application application, Change change, Optional<Deployment> deployment) { return new Versions(targetPlatform(application, change, deployment), targetApplication(application, change, deployment), deployment.map(Deployment::version), deployment.map(Deployment::applicationVersion)); } private Version targetPlatform(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::version), change.platform()) .orElse(application.oldestDeployedPlatform() .orElse(controller.systemVersion())); } private ApplicationVersion targetApplication(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::applicationVersion), change.application()) .orElse(application.oldestDeployedApplication() .orElse(application.deploymentJobs().jobStatus().get(component).lastSuccess().get().application())); } private static class Job extends BuildJob { private final JobType jobType; private final JobRun triggering; private final Instant availableSince; private final boolean isRetry; private final boolean isApplicationUpgrade; private Job(Application application, JobRun triggering, JobType jobType, Instant availableSince, boolean isRetry, boolean isApplicationUpgrade) { super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()); this.jobType = jobType; this.triggering = triggering; this.availableSince = availableSince; this.isRetry = isRetry; this.isApplicationUpgrade = isApplicationUpgrade; } JobType jobType() { return jobType; } Instant availableSince() { return availableSince; } boolean isRetry() { return isRetry; } boolean applicationUpgrade() { return isApplicationUpgrade; } } private static class Versions { private final Version targetPlatform; private final ApplicationVersion targetApplication; private final Optional<Version> sourcePlatform; private final Optional<ApplicationVersion> sourceApplication; private Versions(Version targetPlatform, ApplicationVersion targetApplication, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication) { this.targetPlatform = targetPlatform; this.targetApplication = targetApplication; this.sourcePlatform = sourcePlatform; this.sourceApplication = sourceApplication; } @Override public String toString() { return String.format("platform %s%s, application %s%s", sourcePlatform.filter(source -> ! source.equals(targetPlatform)) .map(source -> source + " -> ").orElse(""), targetPlatform, sourceApplication.filter(source -> ! source.equals(targetApplication)) .map(source -> source.id() + " -> ").orElse(""), targetApplication.id()); } } }
Great!
private List<Job> computeReadyJobs(ApplicationId id) { List<Job> jobs = new ArrayList<>(); applications().get(id).ifPresent(application -> { Change change = application.changeAt(clock.instant()); Optional<Instant> completedAt = max(application.deploymentJobs().statusOf(systemTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at)), application.deploymentJobs().statusOf(stagingTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at))); String reason = "New change available"; List<Job> testJobs = null; if (change.isPresent()) for (Step step : productionStepsOf(application)) { Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet()); Map<Optional<Instant>, List<JobType>> jobsByCompletion = stepJobs.stream().collect(groupingBy(job -> completedAt(change, application, job))); if (jobsByCompletion.containsKey(Optional.empty())) { for (JobType job : jobsByCompletion.get(Optional.empty())) { Versions versions = versions(application, change, deploymentFor(application, job)); if (isTested(application, versions)) { if ( completedAt.isPresent() && jobStateIsAmong(application, job, idle) && stepJobs.containsAll(runningProductionJobs(application))) jobs.add(deploymentJob(application, versions, change, job, reason, completedAt.get())); if ( ! alreadyTriggered(application, versions)) testJobs = emptyList(); } else if (testJobs == null) { testJobs = testJobs(application, versions, String.format("Testing deployment for %s (%s)", job.jobName(), versions.toString()), completedAt.orElse(clock.instant())); } } completedAt = Optional.empty(); } else { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = jobsByCompletion.keySet().stream().map(Optional::get).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } } if (testJobs == null) testJobs = testJobs(application, versions(application, application.change(), Optional.empty()), "Testing last changes outside prod", clock.instant()); jobs.addAll(testJobs); }); return jobs; }
if (jobsByCompletion.containsKey(Optional.empty())) {
private List<Job> computeReadyJobs(ApplicationId id) { List<Job> jobs = new ArrayList<>(); applications().get(id).ifPresent(application -> { Change change = application.changeAt(clock.instant()); Optional<Instant> completedAt = max(application.deploymentJobs().statusOf(systemTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at)), application.deploymentJobs().statusOf(stagingTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at))); String reason = "New change available"; List<Job> testJobs = null; if (change.isPresent()) for (Step step : productionStepsOf(application)) { Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet()); List<JobType> remainingJobs = stepJobs.stream().filter(job -> ! isComplete(change, application, job)).collect(toList()); if ( ! remainingJobs.isEmpty()) { for (JobType job : remainingJobs) { Versions versions = versions(application, change, deploymentFor(application, job)); if (isTested(application, versions)) { if ( completedAt.isPresent() && jobStateOf(application, job) == idle && stepJobs.containsAll(runningProductionJobs(application))) jobs.add(deploymentJob(application, versions, change, job, reason, completedAt.get())); if ( ! alreadyTriggered(application, versions)) testJobs = emptyList(); } else if (testJobs == null) { testJobs = testJobs(application, versions, String.format("Testing deployment for %s (%s)", job.jobName(), versions.toString()), completedAt.orElse(clock.instant())); } } completedAt = Optional.empty(); } else { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = stepJobs.stream().map(job -> application.deploymentJobs().statusOf(job).get().lastCompleted().get().at()).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } } if (testJobs == null) testJobs = testJobs(application, versions(application, application.change(), Optional.empty()), "Testing last changes outside prod", clock.instant()); jobs.addAll(testJobs); }); return jobs; }
class DeploymentTrigger { private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentOrder order; private final BuildService buildService; public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) { Objects.requireNonNull(controller, "controller cannot be null"); Objects.requireNonNull(curator, "curator cannot be null"); Objects.requireNonNull(clock, "clock cannot be null"); this.controller = controller; this.clock = clock; this.order = new DeploymentOrder(controller::system); this.buildService = buildService; } public DeploymentOrder deploymentOrder() { return order; } /** * Records information when a job completes (successfully or not). This information is used when deciding what to * trigger next. */ public void notifyOfCompletion(JobReport report) { log.log(LogLevel.INFO, String.format("Notified of %s for %s of %s (%d).", report.jobError().map(e -> e.toString() + " error") .orElse("success"), report.jobType(), report.applicationId(), report.projectId())); if ( ! applications().get(report.applicationId()).isPresent()) { log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } applications().lockOrThrow(report.applicationId(), application -> { JobRun triggering; if (report.jobType() == component) { ApplicationVersion applicationVersion = ApplicationVersion.from(report.sourceRevision().get(), report.buildNumber()); triggering = JobRun.triggering(controller.systemVersion(), applicationVersion, Optional .empty(), Optional.empty(), "Application commit", clock.instant()); if (report.success()) { if (acceptNewApplicationVersion(application)) application = application.withChange(application.change().with(applicationVersion)) .withOutstandingChange(Change.empty()); else application = application.withOutstandingChange(Change.of(applicationVersion)); } } else { triggering = application.deploymentJobs().statusOf(report.jobType()).flatMap(JobStatus::lastTriggered) .orElseThrow(() -> new IllegalStateException("Notified of completion of " + report.jobType().jobName() + " for " + report.applicationId() + ", but that has neither been triggered nor deployed")); } application = application.withJobCompletion(report.projectId(), report.jobType(), triggering.completion(report.buildNumber(), clock.instant()), report.jobError()); application = application.withChange(remainingChange(application)); applications().store(application); }); } /** Returns a map of jobs that are scheduled to be run, grouped by the job type */ public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() { return computeReadyJobs().stream().collect(groupingBy(Job::jobType)); } /** * Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs. * * Only one job is triggered each run for test jobs, since their environments have limited capacity. */ public long triggerReadyJobs() { return computeReadyJobs().stream() .collect(partitioningBy(job -> job.jobType().isTest())) .entrySet().stream() .flatMap(entry -> (entry.getKey() ? entry.getValue().stream() .sorted(comparing(Job::isRetry) .thenComparing(Job::applicationUpgrade) .reversed() .thenComparing(Job::availableSince)) .collect(groupingBy(Job::jobType)) : entry.getValue().stream() .collect(groupingBy(Job::applicationId))) .values().stream() .map(jobs -> (Supplier<Long>) jobs.stream() .filter(this::trigger) .limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count)) .parallel().map(Supplier::get).reduce(0L, Long::sum); } /** * Attempts to trigger the given job for the given application and returns the outcome. * * If the build service can not find the given job, or claims it is illegal to trigger it, * the project id is removed from the application owning the job, to prevent further trigger attemps. */ public boolean trigger(Job job) { log.log(LogLevel.INFO, String.format("Triggering %s: %s", job, job.triggering)); try { buildService.trigger(job); applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withJobTriggering(job.jobType, job.triggering))); return true; } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e); if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException) applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withProjectId(OptionalLong.empty()))); return false; } } /** Force triggering of a job for given application. */ public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType, String user) { Application application = applications().require(applicationId); if (jobType == component) { buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); return singletonList(component); } Versions versions = versions(application, application.change(), deploymentFor(application, jobType)); String reason = "Job triggered manually by " + user; return (jobType.isProduction() && ! isTested(application, versions) ? testJobs(application, versions, reason, clock.instant()).stream() : Stream.of(deploymentJob(application, versions, application.change(), jobType, reason, clock.instant()))) .peek(this::trigger) .map(Job::jobType).collect(toList()); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already has an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application); }); } /** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .filter(__ -> keepApplicationChange) .map(Change::of) .orElse(Change.empty()))); }); } private ApplicationController applications() { return controller.applications(); } private Optional<JobRun> successOn(Application application, JobType jobType, Versions versions) { return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .filter(run -> targetsMatch(versions, run)); } private Optional<Deployment> deploymentFor(Application application, JobType jobType) { return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()).get())); } private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) { return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2; } /** Returns the set of all jobs which have changes to propagate from the upstream steps. */ private List<Job> computeReadyJobs() { return ApplicationList.from(applications().asList()) .notPullRequest() .withProjectId() .deploying() .idList().stream() .map(this::computeReadyJobs) .flatMap(Collection::stream) .collect(toList()); } /** * Finds the next step to trigger for the given application, if any, and returns these as a list. */ private List<JobType> runningProductionJobs(Application application) { return application.deploymentJobs().jobStatus().keySet().parallelStream() .filter(JobType::isProduction) .filter(job -> isRunning(application, job)) .collect(toList()); } /** Returns whether the given job is currently running; false if completed since last triggered, asking the build service otherwise. */ private boolean isRunning(Application application, JobType jobType) { return ! application.deploymentJobs().statusOf(jobType) .flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))) .orElse(false) && jobStateIsAmong(application, jobType, running, queued); } private boolean jobStateIsAmong(Application application, JobType jobType, JobState... states) { return Arrays.asList(states).contains(buildService.stateOf(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()))); } /** * Returns the instant when the given change is complete for the given application for the given job. * * Any job is complete if the given change is already successful on that job. * A production job is also considered complete if its current change is strictly dominated by what * is already deployed in its zone, i.e., no parts of the change are upgrades, and the full current * change for the application downgrades the deployment, which is an acknowledgement that the deployed * version is broken somehow, such that the job may be locked in failure until a new version is released. */ private Optional<Instant> completedAt(Change change, Application application, JobType jobType) { Versions versions = versions(application, change, deploymentFor(application, jobType)); Optional<JobRun> lastSuccess = successOn(application, jobType, versions); if (lastSuccess.isPresent() || ! jobType.isProduction()) return lastSuccess.map(JobRun::at); return deploymentFor(application, jobType) .filter(deployment -> ! isUpgrade(change, deployment) && isDowngrade(application.change(), deployment)) .map(Deployment::at); } private static boolean isUpgrade(Change change, Deployment deployment) { return change.upgrades(deployment.version()) || change.upgrades(deployment.applicationVersion()); } private static boolean isDowngrade(Change change, Deployment deployment) { return change.downgrades(deployment.version()) || change.downgrades(deployment.applicationVersion()); } private boolean isTested(Application application, Versions versions) { return testedAt(application, versions).isPresent() || alreadyTriggered(application, versions); } private Optional<Instant> testedAt(Application application, Versions versions) { Optional<JobRun> testRun = successOn(application, systemTest, versions); Optional<JobRun> stagingRun = successOn(application, stagingTest, versions) .filter(run -> sourcesMatchIfPresent(versions, run)); return max(testRun.map(JobRun::at), stagingRun.map(JobRun::at)) .filter(__ -> testRun.isPresent() && stagingRun.isPresent()); } private boolean alreadyTriggered(Application application, Versions versions) { return application.deploymentJobs().jobStatus().values().stream() .filter(job -> job.type().isProduction()) .anyMatch(job -> job.lastTriggered() .filter(run -> targetsMatch(versions, run)) .filter(run -> sourcesMatchIfPresent(versions, run)) .isPresent()); } /** If the given state's sources are present and differ from its targets, returns whether they are equal to those * of the given job run. */ private static boolean sourcesMatchIfPresent(Versions versions, JobRun jobRun) { return ( ! versions.sourcePlatform.filter(version -> ! version.equals(versions.targetPlatform)).isPresent() || versions.sourcePlatform.equals(jobRun.sourcePlatform())) && ( ! versions.sourceApplication.filter(version -> ! version.equals(versions.targetApplication)).isPresent() || versions.sourceApplication.equals(jobRun.sourceApplication())); } private static boolean targetsMatch(Versions versions, JobRun jobRun) { return versions.targetPlatform.equals(jobRun.platform()) && versions.targetApplication.equals(jobRun.application()); } private boolean acceptNewApplicationVersion(Application application) { if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; return ! application.changeAt(clock.instant()).platform().isPresent(); } private Change remainingChange(Application application) { List<JobType> jobs = productionStepsOf(application).isEmpty() ? jobsOf(testStepsOf(application)) : jobsOf(productionStepsOf(application)); Change change = application.change(); if (jobs.stream().allMatch(job -> completedAt(application.change().withoutApplication(), application, job).isPresent())) change = change.withoutPlatform(); if (jobs.stream().allMatch(job -> completedAt(application.change().withoutPlatform(), application, job).isPresent())) change = change.withoutApplication(); return change; } /** * Returns the list of test jobs that should run now, and that need to succeed on the given versions for it to be considered tested. */ private List<Job> testJobs(Application application, Versions versions, String reason, Instant availableSince) { List<Job> jobs = new ArrayList<>(); for (JobType jobType : jobsOf(testStepsOf(application))) { Optional<JobRun> completion = successOn(application, jobType, versions) .filter(run -> sourcesMatchIfPresent(versions, run) || jobType == systemTest); if ( ! completion.isPresent() && jobStateIsAmong(application, jobType, idle)) jobs.add(deploymentJob(application, versions, application.change(), jobType, reason, availableSince)); } return jobs; } private List<JobType> jobsOf(Collection<Step> steps) { return steps.stream().flatMap(step -> step.zones().stream()).map(order::toJob).collect(toList()); } private List<Step> testStepsOf(Application application) { return application.deploymentSpec().steps().isEmpty() ? singletonList(new DeploymentSpec.DeclaredZone(test)) : application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(test) || step.deploysTo(staging)) .collect(toList()); } private List<Step> productionStepsOf(Application application) { return application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(prod) || step.zones().isEmpty()) .collect(toList()); } private Job deploymentJob(Application application, Versions versions, Change change, JobType jobType, String reason, Instant availableSince) { boolean isRetry = application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::jobError) .filter(JobError.outOfCapacity::equals).isPresent(); if (isRetry) reason += "; retrying on out of capacity"; JobRun triggering = JobRun.triggering(versions.targetPlatform, versions.targetApplication, versions.sourcePlatform, versions.sourceApplication, reason, clock.instant()); return new Job(application, triggering, jobType, availableSince, isRetry, change.application().isPresent()); } private Versions versions(Application application, Change change, Optional<Deployment> deployment) { return new Versions(targetPlatform(application, change, deployment), targetApplication(application, change, deployment), deployment.map(Deployment::version), deployment.map(Deployment::applicationVersion)); } private Version targetPlatform(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::version), change.platform()) .orElse(application.oldestDeployedPlatform() .orElse(controller.systemVersion())); } private ApplicationVersion targetApplication(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::applicationVersion), change.application()) .orElse(application.oldestDeployedApplication() .orElse(application.deploymentJobs().jobStatus().get(component).lastSuccess().get().application())); } private static class Job extends BuildJob { private final JobType jobType; private final JobRun triggering; private final Instant availableSince; private final boolean isRetry; private final boolean isApplicationUpgrade; private Job(Application application, JobRun triggering, JobType jobType, Instant availableSince, boolean isRetry, boolean isApplicationUpgrade) { super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()); this.jobType = jobType; this.triggering = triggering; this.availableSince = availableSince; this.isRetry = isRetry; this.isApplicationUpgrade = isApplicationUpgrade; } JobType jobType() { return jobType; } Instant availableSince() { return availableSince; } boolean isRetry() { return isRetry; } boolean applicationUpgrade() { return isApplicationUpgrade; } } private static class Versions { private final Version targetPlatform; private final ApplicationVersion targetApplication; private final Optional<Version> sourcePlatform; private final Optional<ApplicationVersion> sourceApplication; private Versions(Version targetPlatform, ApplicationVersion targetApplication, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication) { this.targetPlatform = targetPlatform; this.targetApplication = targetApplication; this.sourcePlatform = sourcePlatform; this.sourceApplication = sourceApplication; } @Override public String toString() { return String.format("platform %s%s, application %s%s", sourcePlatform.filter(source -> ! source.equals(targetPlatform)) .map(source -> source + " -> ").orElse(""), targetPlatform, sourceApplication.filter(source -> ! source.equals(targetApplication)) .map(source -> source.id() + " -> ").orElse(""), targetApplication.id()); } } }
class DeploymentTrigger { private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentOrder order; private final BuildService buildService; public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) { Objects.requireNonNull(controller, "controller cannot be null"); Objects.requireNonNull(curator, "curator cannot be null"); Objects.requireNonNull(clock, "clock cannot be null"); this.controller = controller; this.clock = clock; this.order = new DeploymentOrder(controller::system); this.buildService = buildService; } public DeploymentOrder deploymentOrder() { return order; } /** * Records information when a job completes (successfully or not). This information is used when deciding what to * trigger next. */ public void notifyOfCompletion(JobReport report) { log.log(LogLevel.INFO, String.format("Notified of %s for %s of %s (%d).", report.jobError().map(e -> e.toString() + " error") .orElse("success"), report.jobType(), report.applicationId(), report.projectId())); if ( ! applications().get(report.applicationId()).isPresent()) { log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } applications().lockOrThrow(report.applicationId(), application -> { JobRun triggering; if (report.jobType() == component) { ApplicationVersion applicationVersion = ApplicationVersion.from(report.sourceRevision().get(), report.buildNumber()); triggering = JobRun.triggering(controller.systemVersion(), applicationVersion, Optional .empty(), Optional.empty(), "Application commit", clock.instant()); if (report.success()) { if (acceptNewApplicationVersion(application)) application = application.withChange(application.change().with(applicationVersion)) .withOutstandingChange(Change.empty()); else application = application.withOutstandingChange(Change.of(applicationVersion)); } } else { triggering = application.deploymentJobs().statusOf(report.jobType()).flatMap(JobStatus::lastTriggered) .orElseThrow(() -> new IllegalStateException("Notified of completion of " + report.jobType().jobName() + " for " + report.applicationId() + ", but that has neither been triggered nor deployed")); } application = application.withJobCompletion(report.projectId(), report.jobType(), triggering.completion(report.buildNumber(), clock.instant()), report.jobError()); application = application.withChange(remainingChange(application)); applications().store(application); }); } /** Returns a map of jobs that are scheduled to be run, grouped by the job type */ public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() { return computeReadyJobs().stream().collect(groupingBy(Job::jobType)); } /** * Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs. * * Only one job is triggered each run for test jobs, since their environments have limited capacity. */ public long triggerReadyJobs() { return computeReadyJobs().stream() .collect(partitioningBy(job -> job.jobType().isTest())) .entrySet().stream() .flatMap(entry -> (entry.getKey() ? entry.getValue().stream() .sorted(comparing(Job::isRetry) .thenComparing(Job::applicationUpgrade) .reversed() .thenComparing(Job::availableSince)) .collect(groupingBy(Job::jobType)) : entry.getValue().stream() .collect(groupingBy(Job::applicationId))) .values().stream() .map(jobs -> (Supplier<Long>) jobs.stream() .filter(this::trigger) .limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count)) .parallel().map(Supplier::get).reduce(0L, Long::sum); } /** * Attempts to trigger the given job for the given application and returns the outcome. * * If the build service can not find the given job, or claims it is illegal to trigger it, * the project id is removed from the application owning the job, to prevent further trigger attemps. */ public boolean trigger(Job job) { log.log(LogLevel.INFO, String.format("Triggering %s: %s", job, job.triggering)); try { buildService.trigger(job); applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withJobTriggering(job.jobType, job.triggering))); return true; } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e); if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException) applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withProjectId(OptionalLong.empty()))); return false; } } /** Force triggering of a job for given application. */ public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType, String user) { Application application = applications().require(applicationId); if (jobType == component) { buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); return singletonList(component); } Versions versions = versions(application, application.change(), deploymentFor(application, jobType)); String reason = "Job triggered manually by " + user; return (jobType.isProduction() && ! isTested(application, versions) ? testJobs(application, versions, reason, clock.instant()).stream() : Stream.of(deploymentJob(application, versions, application.change(), jobType, reason, clock.instant()))) .peek(this::trigger) .map(Job::jobType).collect(toList()); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already has an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application); }); } /** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .filter(__ -> keepApplicationChange) .map(Change::of) .orElse(Change.empty()))); }); } private ApplicationController applications() { return controller.applications(); } private Optional<JobRun> successOn(Application application, JobType jobType, Versions versions) { return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .filter(run -> targetsMatch(versions, run)); } private Optional<Deployment> deploymentFor(Application application, JobType jobType) { return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()).get())); } private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) { return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2; } /** Returns the set of all jobs which have changes to propagate from the upstream steps. */ private List<Job> computeReadyJobs() { return ApplicationList.from(applications().asList()) .notPullRequest() .withProjectId() .deploying() .idList().stream() .map(this::computeReadyJobs) .flatMap(Collection::stream) .collect(toList()); } /** * Finds the next step to trigger for the given application, if any, and returns these as a list. */ private List<JobType> runningProductionJobs(Application application) { return application.deploymentJobs().jobStatus().keySet().parallelStream() .filter(JobType::isProduction) .filter(job -> isRunning(application, job)) .collect(toList()); } /** Returns whether the given job is currently running; false if completed since last triggered, asking the build service otherwise. */ private boolean isRunning(Application application, JobType jobType) { return ! application.deploymentJobs().statusOf(jobType) .flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))) .orElse(false) && EnumSet.of(running, queued).contains(jobStateOf(application, jobType)); } private JobState jobStateOf(Application application, JobType jobType) { return buildService.stateOf(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); } /** * Returns whether the given change is complete for the given application for the given job. * * Any job is complete if the given change is already successful on that job. * A production job is also considered complete if its current change is strictly dominated by what * is already deployed in its zone, i.e., no parts of the change are upgrades, and the full current * change for the application downgrades the deployment, which is an acknowledgement that the deployed * version is broken somehow, such that the job may be locked in failure until a new version is released. */ private boolean isComplete(Change change, Application application, JobType jobType) { Optional<Deployment> existingDeployment = deploymentFor(application, jobType); return successOn(application, jobType, versions(application, change, existingDeployment)).isPresent() || jobType.isProduction() && existingDeployment.map(deployment -> ! isUpgrade(change, deployment) && isDowngrade(application.change(), deployment)) .orElse(false); } private static boolean isUpgrade(Change change, Deployment deployment) { return change.upgrades(deployment.version()) || change.upgrades(deployment.applicationVersion()); } private static boolean isDowngrade(Change change, Deployment deployment) { return change.downgrades(deployment.version()) || change.downgrades(deployment.applicationVersion()); } private boolean isTested(Application application, Versions versions) { return testedAt(application, versions).isPresent() || alreadyTriggered(application, versions); } private Optional<Instant> testedAt(Application application, Versions versions) { Optional<JobRun> testRun = successOn(application, systemTest, versions); Optional<JobRun> stagingRun = successOn(application, stagingTest, versions) .filter(run -> sourcesMatchIfPresent(versions, run)); return max(testRun.map(JobRun::at), stagingRun.map(JobRun::at)) .filter(__ -> testRun.isPresent() && stagingRun.isPresent()); } private boolean alreadyTriggered(Application application, Versions versions) { return application.deploymentJobs().jobStatus().values().stream() .filter(job -> job.type().isProduction()) .anyMatch(job -> job.lastTriggered() .filter(run -> targetsMatch(versions, run)) .filter(run -> sourcesMatchIfPresent(versions, run)) .isPresent()); } /** If the given state's sources are present and differ from its targets, returns whether they are equal to those * of the given job run. */ private static boolean sourcesMatchIfPresent(Versions versions, JobRun jobRun) { return ( ! versions.sourcePlatform.filter(version -> ! version.equals(versions.targetPlatform)).isPresent() || versions.sourcePlatform.equals(jobRun.sourcePlatform())) && ( ! versions.sourceApplication.filter(version -> ! version.equals(versions.targetApplication)).isPresent() || versions.sourceApplication.equals(jobRun.sourceApplication())); } private static boolean targetsMatch(Versions versions, JobRun jobRun) { return versions.targetPlatform.equals(jobRun.platform()) && versions.targetApplication.equals(jobRun.application()); } private boolean acceptNewApplicationVersion(Application application) { if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; return ! application.changeAt(clock.instant()).platform().isPresent(); } private Change remainingChange(Application application) { List<JobType> jobs = productionStepsOf(application).isEmpty() ? jobsOf(testStepsOf(application)) : jobsOf(productionStepsOf(application)); Change change = application.change(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutApplication(), application, job))) change = change.withoutPlatform(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutPlatform(), application, job))) change = change.withoutApplication(); return change; } /** * Returns the list of test jobs that should run now, and that need to succeed on the given versions for it to be considered tested. */ private List<Job> testJobs(Application application, Versions versions, String reason, Instant availableSince) { List<Job> jobs = new ArrayList<>(); for (JobType jobType : jobsOf(testStepsOf(application))) { Optional<JobRun> completion = successOn(application, jobType, versions) .filter(run -> sourcesMatchIfPresent(versions, run) || jobType == systemTest); if ( ! completion.isPresent() && jobStateOf(application, jobType) == idle) jobs.add(deploymentJob(application, versions, application.change(), jobType, reason, availableSince)); } return jobs; } private List<JobType> jobsOf(Collection<Step> steps) { return steps.stream().flatMap(step -> step.zones().stream()).map(order::toJob).collect(toList()); } private List<Step> testStepsOf(Application application) { return application.deploymentSpec().steps().isEmpty() ? singletonList(new DeploymentSpec.DeclaredZone(test)) : application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(test) || step.deploysTo(staging)) .collect(toList()); } private List<Step> productionStepsOf(Application application) { return application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(prod) || step.zones().isEmpty()) .collect(toList()); } private Job deploymentJob(Application application, Versions versions, Change change, JobType jobType, String reason, Instant availableSince) { boolean isRetry = application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::jobError) .filter(JobError.outOfCapacity::equals).isPresent(); if (isRetry) reason += "; retrying on out of capacity"; JobRun triggering = JobRun.triggering(versions.targetPlatform, versions.targetApplication, versions.sourcePlatform, versions.sourceApplication, reason, clock.instant()); return new Job(application, triggering, jobType, availableSince, isRetry, change.application().isPresent()); } private Versions versions(Application application, Change change, Optional<Deployment> deployment) { return new Versions(targetPlatform(application, change, deployment), targetApplication(application, change, deployment), deployment.map(Deployment::version), deployment.map(Deployment::applicationVersion)); } private Version targetPlatform(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::version), change.platform()) .orElse(application.oldestDeployedPlatform() .orElse(controller.systemVersion())); } private ApplicationVersion targetApplication(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::applicationVersion), change.application()) .orElse(application.oldestDeployedApplication() .orElse(application.deploymentJobs().jobStatus().get(component).lastSuccess().get().application())); } private static class Job extends BuildJob { private final JobType jobType; private final JobRun triggering; private final Instant availableSince; private final boolean isRetry; private final boolean isApplicationUpgrade; private Job(Application application, JobRun triggering, JobType jobType, Instant availableSince, boolean isRetry, boolean isApplicationUpgrade) { super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()); this.jobType = jobType; this.triggering = triggering; this.availableSince = availableSince; this.isRetry = isRetry; this.isApplicationUpgrade = isApplicationUpgrade; } JobType jobType() { return jobType; } Instant availableSince() { return availableSince; } boolean isRetry() { return isRetry; } boolean applicationUpgrade() { return isApplicationUpgrade; } } private static class Versions { private final Version targetPlatform; private final ApplicationVersion targetApplication; private final Optional<Version> sourcePlatform; private final Optional<ApplicationVersion> sourceApplication; private Versions(Version targetPlatform, ApplicationVersion targetApplication, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication) { this.targetPlatform = targetPlatform; this.targetApplication = targetApplication; this.sourcePlatform = sourcePlatform; this.sourceApplication = sourceApplication; } @Override public String toString() { return String.format("platform %s%s, application %s%s", sourcePlatform.filter(source -> ! source.equals(targetPlatform)) .map(source -> source + " -> ").orElse(""), targetPlatform, sourceApplication.filter(source -> ! source.equals(targetApplication)) .map(source -> source.id() + " -> ").orElse(""), targetApplication.id()); } } }
Could be simplified a bit into something like ```java String clusterName = currentCluster.getName(); currentCluster.getDocumentDefinitions().forEach((documentTypeName, currentDocumentType) -> { (...) ``` to avoid having to manually unwrap key/value from entry
private void validateContentCluster(ContentCluster currentCluster, ContentCluster nextCluster) { for (Map.Entry<String, NewDocumentType> currentEntry : currentCluster.getDocumentDefinitions().entrySet()) { String clusterName = currentCluster.getName(); String documentTypeName = currentEntry.getKey(); NewDocumentType currentDocumentType = currentEntry.getValue(); NewDocumentType nextDocumentType = nextCluster.getDocumentDefinitions().get(documentTypeName); if (nextDocumentType != null) { boolean currentIsGlobal = currentCluster.isGloballyDistributed(currentDocumentType); boolean nextIsGlobal = nextCluster.isGloballyDistributed(nextDocumentType); if (currentIsGlobal != nextIsGlobal) { throw new IllegalStateException(String.format("Document type %s in cluster %s changed global from %s to %s", documentTypeName, clusterName, currentIsGlobal, nextIsGlobal)); } } } }
NewDocumentType currentDocumentType = currentEntry.getValue();
private void validateContentCluster(ContentCluster currentCluster, ContentCluster nextCluster) { String clusterName = currentCluster.getName(); currentCluster.getDocumentDefinitions().forEach((documentTypeName, currentDocumentType) -> { NewDocumentType nextDocumentType = nextCluster.getDocumentDefinitions().get(documentTypeName); if (nextDocumentType != null) { boolean currentIsGlobal = currentCluster.isGloballyDistributed(currentDocumentType); boolean nextIsGlobal = nextCluster.isGloballyDistributed(nextDocumentType); if (currentIsGlobal != nextIsGlobal) { throw new IllegalStateException(String.format("Document type %s in cluster %s changed global from %s to %s", documentTypeName, clusterName, currentIsGlobal, nextIsGlobal)); } } }); }
class GlobalDocumentChangeValidator implements ChangeValidator { @Override public List<ConfigChangeAction> validate(VespaModel currentModel, VespaModel nextModel, ValidationOverrides overrides, Instant now) { if (!overrides.allows(ValidationId.globalDocumentChange.value(), now)) { for (Map.Entry<String, ContentCluster> currentEntry : currentModel.getContentClusters().entrySet()) { ContentCluster nextCluster = nextModel.getContentClusters().get(currentEntry.getKey()); if (nextCluster == null) continue; validateContentCluster(currentEntry.getValue(), nextCluster); } } return new ArrayList<>(); } }
class GlobalDocumentChangeValidator implements ChangeValidator { @Override public List<ConfigChangeAction> validate(VespaModel currentModel, VespaModel nextModel, ValidationOverrides overrides, Instant now) { if (!overrides.allows(ValidationId.globalDocumentChange.value(), now)) { for (Map.Entry<String, ContentCluster> currentEntry : currentModel.getContentClusters().entrySet()) { ContentCluster nextCluster = nextModel.getContentClusters().get(currentEntry.getKey()); if (nextCluster == null) continue; validateContentCluster(currentEntry.getValue(), nextCluster); } } return Collections.emptyList(); } }
Of course I agree the code should be readable; I just don't think that was the right change to fix it.
private List<Job> computeReadyJobs(ApplicationId id) { List<Job> jobs = new ArrayList<>(); applications().get(id).ifPresent(application -> { Change change = application.changeAt(clock.instant()); Optional<Instant> completedAt = max(application.deploymentJobs().statusOf(systemTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at)), application.deploymentJobs().statusOf(stagingTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at))); String reason = "New change available"; List<Job> testJobs = null; if (change.isPresent()) for (Step step : productionStepsOf(application)) { Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet()); Map<Optional<Instant>, List<JobType>> jobsByCompletion = stepJobs.stream().collect(groupingBy(job -> completedAt(change, application, job))); if (jobsByCompletion.containsKey(Optional.empty())) { for (JobType job : jobsByCompletion.get(Optional.empty())) { Versions versions = versions(application, change, deploymentFor(application, job)); if (isTested(application, versions)) { if ( completedAt.isPresent() && jobStateIsAmong(application, job, idle) && stepJobs.containsAll(runningProductionJobs(application))) jobs.add(deploymentJob(application, versions, change, job, reason, completedAt.get())); if ( ! alreadyTriggered(application, versions)) testJobs = emptyList(); } else if (testJobs == null) { testJobs = testJobs(application, versions, String.format("Testing deployment for %s (%s)", job.jobName(), versions.toString()), completedAt.orElse(clock.instant())); } } completedAt = Optional.empty(); } else { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = jobsByCompletion.keySet().stream().map(Optional::get).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } } if (testJobs == null) testJobs = testJobs(application, versions(application, application.change(), Optional.empty()), "Testing last changes outside prod", clock.instant()); jobs.addAll(testJobs); }); return jobs; }
if (jobsByCompletion.containsKey(Optional.empty())) {
private List<Job> computeReadyJobs(ApplicationId id) { List<Job> jobs = new ArrayList<>(); applications().get(id).ifPresent(application -> { Change change = application.changeAt(clock.instant()); Optional<Instant> completedAt = max(application.deploymentJobs().statusOf(systemTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at)), application.deploymentJobs().statusOf(stagingTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at))); String reason = "New change available"; List<Job> testJobs = null; if (change.isPresent()) for (Step step : productionStepsOf(application)) { Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet()); List<JobType> remainingJobs = stepJobs.stream().filter(job -> ! isComplete(change, application, job)).collect(toList()); if ( ! remainingJobs.isEmpty()) { for (JobType job : remainingJobs) { Versions versions = versions(application, change, deploymentFor(application, job)); if (isTested(application, versions)) { if ( completedAt.isPresent() && jobStateOf(application, job) == idle && stepJobs.containsAll(runningProductionJobs(application))) jobs.add(deploymentJob(application, versions, change, job, reason, completedAt.get())); if ( ! alreadyTriggered(application, versions)) testJobs = emptyList(); } else if (testJobs == null) { testJobs = testJobs(application, versions, String.format("Testing deployment for %s (%s)", job.jobName(), versions.toString()), completedAt.orElse(clock.instant())); } } completedAt = Optional.empty(); } else { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = stepJobs.stream().map(job -> application.deploymentJobs().statusOf(job).get().lastCompleted().get().at()).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } } if (testJobs == null) testJobs = testJobs(application, versions(application, application.change(), Optional.empty()), "Testing last changes outside prod", clock.instant()); jobs.addAll(testJobs); }); return jobs; }
class DeploymentTrigger { private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentOrder order; private final BuildService buildService; public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) { Objects.requireNonNull(controller, "controller cannot be null"); Objects.requireNonNull(curator, "curator cannot be null"); Objects.requireNonNull(clock, "clock cannot be null"); this.controller = controller; this.clock = clock; this.order = new DeploymentOrder(controller::system); this.buildService = buildService; } public DeploymentOrder deploymentOrder() { return order; } /** * Records information when a job completes (successfully or not). This information is used when deciding what to * trigger next. */ public void notifyOfCompletion(JobReport report) { log.log(LogLevel.INFO, String.format("Notified of %s for %s of %s (%d).", report.jobError().map(e -> e.toString() + " error") .orElse("success"), report.jobType(), report.applicationId(), report.projectId())); if ( ! applications().get(report.applicationId()).isPresent()) { log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } applications().lockOrThrow(report.applicationId(), application -> { JobRun triggering; if (report.jobType() == component) { ApplicationVersion applicationVersion = ApplicationVersion.from(report.sourceRevision().get(), report.buildNumber()); triggering = JobRun.triggering(controller.systemVersion(), applicationVersion, Optional .empty(), Optional.empty(), "Application commit", clock.instant()); if (report.success()) { if (acceptNewApplicationVersion(application)) application = application.withChange(application.change().with(applicationVersion)) .withOutstandingChange(Change.empty()); else application = application.withOutstandingChange(Change.of(applicationVersion)); } } else { triggering = application.deploymentJobs().statusOf(report.jobType()).flatMap(JobStatus::lastTriggered) .orElseThrow(() -> new IllegalStateException("Notified of completion of " + report.jobType().jobName() + " for " + report.applicationId() + ", but that has neither been triggered nor deployed")); } application = application.withJobCompletion(report.projectId(), report.jobType(), triggering.completion(report.buildNumber(), clock.instant()), report.jobError()); application = application.withChange(remainingChange(application)); applications().store(application); }); } /** Returns a map of jobs that are scheduled to be run, grouped by the job type */ public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() { return computeReadyJobs().stream().collect(groupingBy(Job::jobType)); } /** * Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs. * * Only one job is triggered each run for test jobs, since their environments have limited capacity. */ public long triggerReadyJobs() { return computeReadyJobs().stream() .collect(partitioningBy(job -> job.jobType().isTest())) .entrySet().stream() .flatMap(entry -> (entry.getKey() ? entry.getValue().stream() .sorted(comparing(Job::isRetry) .thenComparing(Job::applicationUpgrade) .reversed() .thenComparing(Job::availableSince)) .collect(groupingBy(Job::jobType)) : entry.getValue().stream() .collect(groupingBy(Job::applicationId))) .values().stream() .map(jobs -> (Supplier<Long>) jobs.stream() .filter(this::trigger) .limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count)) .parallel().map(Supplier::get).reduce(0L, Long::sum); } /** * Attempts to trigger the given job for the given application and returns the outcome. * * If the build service can not find the given job, or claims it is illegal to trigger it, * the project id is removed from the application owning the job, to prevent further trigger attemps. */ public boolean trigger(Job job) { log.log(LogLevel.INFO, String.format("Triggering %s: %s", job, job.triggering)); try { buildService.trigger(job); applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withJobTriggering(job.jobType, job.triggering))); return true; } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e); if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException) applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withProjectId(OptionalLong.empty()))); return false; } } /** Force triggering of a job for given application. */ public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType, String user) { Application application = applications().require(applicationId); if (jobType == component) { buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); return singletonList(component); } Versions versions = versions(application, application.change(), deploymentFor(application, jobType)); String reason = "Job triggered manually by " + user; return (jobType.isProduction() && ! isTested(application, versions) ? testJobs(application, versions, reason, clock.instant()).stream() : Stream.of(deploymentJob(application, versions, application.change(), jobType, reason, clock.instant()))) .peek(this::trigger) .map(Job::jobType).collect(toList()); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already has an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application); }); } /** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .filter(__ -> keepApplicationChange) .map(Change::of) .orElse(Change.empty()))); }); } private ApplicationController applications() { return controller.applications(); } private Optional<JobRun> successOn(Application application, JobType jobType, Versions versions) { return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .filter(run -> targetsMatch(versions, run)); } private Optional<Deployment> deploymentFor(Application application, JobType jobType) { return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()).get())); } private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) { return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2; } /** Returns the set of all jobs which have changes to propagate from the upstream steps. */ private List<Job> computeReadyJobs() { return ApplicationList.from(applications().asList()) .notPullRequest() .withProjectId() .deploying() .idList().stream() .map(this::computeReadyJobs) .flatMap(Collection::stream) .collect(toList()); } /** * Finds the next step to trigger for the given application, if any, and returns these as a list. */ private List<JobType> runningProductionJobs(Application application) { return application.deploymentJobs().jobStatus().keySet().parallelStream() .filter(JobType::isProduction) .filter(job -> isRunning(application, job)) .collect(toList()); } /** Returns whether the given job is currently running; false if completed since last triggered, asking the build service otherwise. */ private boolean isRunning(Application application, JobType jobType) { return ! application.deploymentJobs().statusOf(jobType) .flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))) .orElse(false) && jobStateIsAmong(application, jobType, running, queued); } private boolean jobStateIsAmong(Application application, JobType jobType, JobState... states) { return Arrays.asList(states).contains(buildService.stateOf(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()))); } /** * Returns the instant when the given change is complete for the given application for the given job. * * Any job is complete if the given change is already successful on that job. * A production job is also considered complete if its current change is strictly dominated by what * is already deployed in its zone, i.e., no parts of the change are upgrades, and the full current * change for the application downgrades the deployment, which is an acknowledgement that the deployed * version is broken somehow, such that the job may be locked in failure until a new version is released. */ private Optional<Instant> completedAt(Change change, Application application, JobType jobType) { Versions versions = versions(application, change, deploymentFor(application, jobType)); Optional<JobRun> lastSuccess = successOn(application, jobType, versions); if (lastSuccess.isPresent() || ! jobType.isProduction()) return lastSuccess.map(JobRun::at); return deploymentFor(application, jobType) .filter(deployment -> ! isUpgrade(change, deployment) && isDowngrade(application.change(), deployment)) .map(Deployment::at); } private static boolean isUpgrade(Change change, Deployment deployment) { return change.upgrades(deployment.version()) || change.upgrades(deployment.applicationVersion()); } private static boolean isDowngrade(Change change, Deployment deployment) { return change.downgrades(deployment.version()) || change.downgrades(deployment.applicationVersion()); } private boolean isTested(Application application, Versions versions) { return testedAt(application, versions).isPresent() || alreadyTriggered(application, versions); } private Optional<Instant> testedAt(Application application, Versions versions) { Optional<JobRun> testRun = successOn(application, systemTest, versions); Optional<JobRun> stagingRun = successOn(application, stagingTest, versions) .filter(run -> sourcesMatchIfPresent(versions, run)); return max(testRun.map(JobRun::at), stagingRun.map(JobRun::at)) .filter(__ -> testRun.isPresent() && stagingRun.isPresent()); } private boolean alreadyTriggered(Application application, Versions versions) { return application.deploymentJobs().jobStatus().values().stream() .filter(job -> job.type().isProduction()) .anyMatch(job -> job.lastTriggered() .filter(run -> targetsMatch(versions, run)) .filter(run -> sourcesMatchIfPresent(versions, run)) .isPresent()); } /** If the given state's sources are present and differ from its targets, returns whether they are equal to those * of the given job run. */ private static boolean sourcesMatchIfPresent(Versions versions, JobRun jobRun) { return ( ! versions.sourcePlatform.filter(version -> ! version.equals(versions.targetPlatform)).isPresent() || versions.sourcePlatform.equals(jobRun.sourcePlatform())) && ( ! versions.sourceApplication.filter(version -> ! version.equals(versions.targetApplication)).isPresent() || versions.sourceApplication.equals(jobRun.sourceApplication())); } private static boolean targetsMatch(Versions versions, JobRun jobRun) { return versions.targetPlatform.equals(jobRun.platform()) && versions.targetApplication.equals(jobRun.application()); } private boolean acceptNewApplicationVersion(Application application) { if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; return ! application.changeAt(clock.instant()).platform().isPresent(); } private Change remainingChange(Application application) { List<JobType> jobs = productionStepsOf(application).isEmpty() ? jobsOf(testStepsOf(application)) : jobsOf(productionStepsOf(application)); Change change = application.change(); if (jobs.stream().allMatch(job -> completedAt(application.change().withoutApplication(), application, job).isPresent())) change = change.withoutPlatform(); if (jobs.stream().allMatch(job -> completedAt(application.change().withoutPlatform(), application, job).isPresent())) change = change.withoutApplication(); return change; } /** * Returns the list of test jobs that should run now, and that need to succeed on the given versions for it to be considered tested. */ private List<Job> testJobs(Application application, Versions versions, String reason, Instant availableSince) { List<Job> jobs = new ArrayList<>(); for (JobType jobType : jobsOf(testStepsOf(application))) { Optional<JobRun> completion = successOn(application, jobType, versions) .filter(run -> sourcesMatchIfPresent(versions, run) || jobType == systemTest); if ( ! completion.isPresent() && jobStateIsAmong(application, jobType, idle)) jobs.add(deploymentJob(application, versions, application.change(), jobType, reason, availableSince)); } return jobs; } private List<JobType> jobsOf(Collection<Step> steps) { return steps.stream().flatMap(step -> step.zones().stream()).map(order::toJob).collect(toList()); } private List<Step> testStepsOf(Application application) { return application.deploymentSpec().steps().isEmpty() ? singletonList(new DeploymentSpec.DeclaredZone(test)) : application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(test) || step.deploysTo(staging)) .collect(toList()); } private List<Step> productionStepsOf(Application application) { return application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(prod) || step.zones().isEmpty()) .collect(toList()); } private Job deploymentJob(Application application, Versions versions, Change change, JobType jobType, String reason, Instant availableSince) { boolean isRetry = application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::jobError) .filter(JobError.outOfCapacity::equals).isPresent(); if (isRetry) reason += "; retrying on out of capacity"; JobRun triggering = JobRun.triggering(versions.targetPlatform, versions.targetApplication, versions.sourcePlatform, versions.sourceApplication, reason, clock.instant()); return new Job(application, triggering, jobType, availableSince, isRetry, change.application().isPresent()); } private Versions versions(Application application, Change change, Optional<Deployment> deployment) { return new Versions(targetPlatform(application, change, deployment), targetApplication(application, change, deployment), deployment.map(Deployment::version), deployment.map(Deployment::applicationVersion)); } private Version targetPlatform(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::version), change.platform()) .orElse(application.oldestDeployedPlatform() .orElse(controller.systemVersion())); } private ApplicationVersion targetApplication(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::applicationVersion), change.application()) .orElse(application.oldestDeployedApplication() .orElse(application.deploymentJobs().jobStatus().get(component).lastSuccess().get().application())); } private static class Job extends BuildJob { private final JobType jobType; private final JobRun triggering; private final Instant availableSince; private final boolean isRetry; private final boolean isApplicationUpgrade; private Job(Application application, JobRun triggering, JobType jobType, Instant availableSince, boolean isRetry, boolean isApplicationUpgrade) { super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()); this.jobType = jobType; this.triggering = triggering; this.availableSince = availableSince; this.isRetry = isRetry; this.isApplicationUpgrade = isApplicationUpgrade; } JobType jobType() { return jobType; } Instant availableSince() { return availableSince; } boolean isRetry() { return isRetry; } boolean applicationUpgrade() { return isApplicationUpgrade; } } private static class Versions { private final Version targetPlatform; private final ApplicationVersion targetApplication; private final Optional<Version> sourcePlatform; private final Optional<ApplicationVersion> sourceApplication; private Versions(Version targetPlatform, ApplicationVersion targetApplication, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication) { this.targetPlatform = targetPlatform; this.targetApplication = targetApplication; this.sourcePlatform = sourcePlatform; this.sourceApplication = sourceApplication; } @Override public String toString() { return String.format("platform %s%s, application %s%s", sourcePlatform.filter(source -> ! source.equals(targetPlatform)) .map(source -> source + " -> ").orElse(""), targetPlatform, sourceApplication.filter(source -> ! source.equals(targetApplication)) .map(source -> source.id() + " -> ").orElse(""), targetApplication.id()); } } }
class DeploymentTrigger { private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentOrder order; private final BuildService buildService; public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) { Objects.requireNonNull(controller, "controller cannot be null"); Objects.requireNonNull(curator, "curator cannot be null"); Objects.requireNonNull(clock, "clock cannot be null"); this.controller = controller; this.clock = clock; this.order = new DeploymentOrder(controller::system); this.buildService = buildService; } public DeploymentOrder deploymentOrder() { return order; } /** * Records information when a job completes (successfully or not). This information is used when deciding what to * trigger next. */ public void notifyOfCompletion(JobReport report) { log.log(LogLevel.INFO, String.format("Notified of %s for %s of %s (%d).", report.jobError().map(e -> e.toString() + " error") .orElse("success"), report.jobType(), report.applicationId(), report.projectId())); if ( ! applications().get(report.applicationId()).isPresent()) { log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } applications().lockOrThrow(report.applicationId(), application -> { JobRun triggering; if (report.jobType() == component) { ApplicationVersion applicationVersion = ApplicationVersion.from(report.sourceRevision().get(), report.buildNumber()); triggering = JobRun.triggering(controller.systemVersion(), applicationVersion, Optional .empty(), Optional.empty(), "Application commit", clock.instant()); if (report.success()) { if (acceptNewApplicationVersion(application)) application = application.withChange(application.change().with(applicationVersion)) .withOutstandingChange(Change.empty()); else application = application.withOutstandingChange(Change.of(applicationVersion)); } } else { triggering = application.deploymentJobs().statusOf(report.jobType()).flatMap(JobStatus::lastTriggered) .orElseThrow(() -> new IllegalStateException("Notified of completion of " + report.jobType().jobName() + " for " + report.applicationId() + ", but that has neither been triggered nor deployed")); } application = application.withJobCompletion(report.projectId(), report.jobType(), triggering.completion(report.buildNumber(), clock.instant()), report.jobError()); application = application.withChange(remainingChange(application)); applications().store(application); }); } /** Returns a map of jobs that are scheduled to be run, grouped by the job type */ public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() { return computeReadyJobs().stream().collect(groupingBy(Job::jobType)); } /** * Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs. * * Only one job is triggered each run for test jobs, since their environments have limited capacity. */ public long triggerReadyJobs() { return computeReadyJobs().stream() .collect(partitioningBy(job -> job.jobType().isTest())) .entrySet().stream() .flatMap(entry -> (entry.getKey() ? entry.getValue().stream() .sorted(comparing(Job::isRetry) .thenComparing(Job::applicationUpgrade) .reversed() .thenComparing(Job::availableSince)) .collect(groupingBy(Job::jobType)) : entry.getValue().stream() .collect(groupingBy(Job::applicationId))) .values().stream() .map(jobs -> (Supplier<Long>) jobs.stream() .filter(this::trigger) .limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count)) .parallel().map(Supplier::get).reduce(0L, Long::sum); } /** * Attempts to trigger the given job for the given application and returns the outcome. * * If the build service can not find the given job, or claims it is illegal to trigger it, * the project id is removed from the application owning the job, to prevent further trigger attemps. */ public boolean trigger(Job job) { log.log(LogLevel.INFO, String.format("Triggering %s: %s", job, job.triggering)); try { buildService.trigger(job); applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withJobTriggering(job.jobType, job.triggering))); return true; } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e); if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException) applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withProjectId(OptionalLong.empty()))); return false; } } /** Force triggering of a job for given application. */ public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType, String user) { Application application = applications().require(applicationId); if (jobType == component) { buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); return singletonList(component); } Versions versions = versions(application, application.change(), deploymentFor(application, jobType)); String reason = "Job triggered manually by " + user; return (jobType.isProduction() && ! isTested(application, versions) ? testJobs(application, versions, reason, clock.instant()).stream() : Stream.of(deploymentJob(application, versions, application.change(), jobType, reason, clock.instant()))) .peek(this::trigger) .map(Job::jobType).collect(toList()); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already has an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application); }); } /** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .filter(__ -> keepApplicationChange) .map(Change::of) .orElse(Change.empty()))); }); } private ApplicationController applications() { return controller.applications(); } private Optional<JobRun> successOn(Application application, JobType jobType, Versions versions) { return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .filter(run -> targetsMatch(versions, run)); } private Optional<Deployment> deploymentFor(Application application, JobType jobType) { return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()).get())); } private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) { return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2; } /** Returns the set of all jobs which have changes to propagate from the upstream steps. */ private List<Job> computeReadyJobs() { return ApplicationList.from(applications().asList()) .notPullRequest() .withProjectId() .deploying() .idList().stream() .map(this::computeReadyJobs) .flatMap(Collection::stream) .collect(toList()); } /** * Finds the next step to trigger for the given application, if any, and returns these as a list. */ private List<JobType> runningProductionJobs(Application application) { return application.deploymentJobs().jobStatus().keySet().parallelStream() .filter(JobType::isProduction) .filter(job -> isRunning(application, job)) .collect(toList()); } /** Returns whether the given job is currently running; false if completed since last triggered, asking the build service otherwise. */ private boolean isRunning(Application application, JobType jobType) { return ! application.deploymentJobs().statusOf(jobType) .flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))) .orElse(false) && EnumSet.of(running, queued).contains(jobStateOf(application, jobType)); } private JobState jobStateOf(Application application, JobType jobType) { return buildService.stateOf(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); } /** * Returns whether the given change is complete for the given application for the given job. * * Any job is complete if the given change is already successful on that job. * A production job is also considered complete if its current change is strictly dominated by what * is already deployed in its zone, i.e., no parts of the change are upgrades, and the full current * change for the application downgrades the deployment, which is an acknowledgement that the deployed * version is broken somehow, such that the job may be locked in failure until a new version is released. */ private boolean isComplete(Change change, Application application, JobType jobType) { Optional<Deployment> existingDeployment = deploymentFor(application, jobType); return successOn(application, jobType, versions(application, change, existingDeployment)).isPresent() || jobType.isProduction() && existingDeployment.map(deployment -> ! isUpgrade(change, deployment) && isDowngrade(application.change(), deployment)) .orElse(false); } private static boolean isUpgrade(Change change, Deployment deployment) { return change.upgrades(deployment.version()) || change.upgrades(deployment.applicationVersion()); } private static boolean isDowngrade(Change change, Deployment deployment) { return change.downgrades(deployment.version()) || change.downgrades(deployment.applicationVersion()); } private boolean isTested(Application application, Versions versions) { return testedAt(application, versions).isPresent() || alreadyTriggered(application, versions); } private Optional<Instant> testedAt(Application application, Versions versions) { Optional<JobRun> testRun = successOn(application, systemTest, versions); Optional<JobRun> stagingRun = successOn(application, stagingTest, versions) .filter(run -> sourcesMatchIfPresent(versions, run)); return max(testRun.map(JobRun::at), stagingRun.map(JobRun::at)) .filter(__ -> testRun.isPresent() && stagingRun.isPresent()); } private boolean alreadyTriggered(Application application, Versions versions) { return application.deploymentJobs().jobStatus().values().stream() .filter(job -> job.type().isProduction()) .anyMatch(job -> job.lastTriggered() .filter(run -> targetsMatch(versions, run)) .filter(run -> sourcesMatchIfPresent(versions, run)) .isPresent()); } /** If the given state's sources are present and differ from its targets, returns whether they are equal to those * of the given job run. */ private static boolean sourcesMatchIfPresent(Versions versions, JobRun jobRun) { return ( ! versions.sourcePlatform.filter(version -> ! version.equals(versions.targetPlatform)).isPresent() || versions.sourcePlatform.equals(jobRun.sourcePlatform())) && ( ! versions.sourceApplication.filter(version -> ! version.equals(versions.targetApplication)).isPresent() || versions.sourceApplication.equals(jobRun.sourceApplication())); } private static boolean targetsMatch(Versions versions, JobRun jobRun) { return versions.targetPlatform.equals(jobRun.platform()) && versions.targetApplication.equals(jobRun.application()); } private boolean acceptNewApplicationVersion(Application application) { if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; return ! application.changeAt(clock.instant()).platform().isPresent(); } private Change remainingChange(Application application) { List<JobType> jobs = productionStepsOf(application).isEmpty() ? jobsOf(testStepsOf(application)) : jobsOf(productionStepsOf(application)); Change change = application.change(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutApplication(), application, job))) change = change.withoutPlatform(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutPlatform(), application, job))) change = change.withoutApplication(); return change; } /** * Returns the list of test jobs that should run now, and that need to succeed on the given versions for it to be considered tested. */ private List<Job> testJobs(Application application, Versions versions, String reason, Instant availableSince) { List<Job> jobs = new ArrayList<>(); for (JobType jobType : jobsOf(testStepsOf(application))) { Optional<JobRun> completion = successOn(application, jobType, versions) .filter(run -> sourcesMatchIfPresent(versions, run) || jobType == systemTest); if ( ! completion.isPresent() && jobStateOf(application, jobType) == idle) jobs.add(deploymentJob(application, versions, application.change(), jobType, reason, availableSince)); } return jobs; } private List<JobType> jobsOf(Collection<Step> steps) { return steps.stream().flatMap(step -> step.zones().stream()).map(order::toJob).collect(toList()); } private List<Step> testStepsOf(Application application) { return application.deploymentSpec().steps().isEmpty() ? singletonList(new DeploymentSpec.DeclaredZone(test)) : application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(test) || step.deploysTo(staging)) .collect(toList()); } private List<Step> productionStepsOf(Application application) { return application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(prod) || step.zones().isEmpty()) .collect(toList()); } private Job deploymentJob(Application application, Versions versions, Change change, JobType jobType, String reason, Instant availableSince) { boolean isRetry = application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::jobError) .filter(JobError.outOfCapacity::equals).isPresent(); if (isRetry) reason += "; retrying on out of capacity"; JobRun triggering = JobRun.triggering(versions.targetPlatform, versions.targetApplication, versions.sourcePlatform, versions.sourceApplication, reason, clock.instant()); return new Job(application, triggering, jobType, availableSince, isRetry, change.application().isPresent()); } private Versions versions(Application application, Change change, Optional<Deployment> deployment) { return new Versions(targetPlatform(application, change, deployment), targetApplication(application, change, deployment), deployment.map(Deployment::version), deployment.map(Deployment::applicationVersion)); } private Version targetPlatform(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::version), change.platform()) .orElse(application.oldestDeployedPlatform() .orElse(controller.systemVersion())); } private ApplicationVersion targetApplication(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::applicationVersion), change.application()) .orElse(application.oldestDeployedApplication() .orElse(application.deploymentJobs().jobStatus().get(component).lastSuccess().get().application())); } private static class Job extends BuildJob { private final JobType jobType; private final JobRun triggering; private final Instant availableSince; private final boolean isRetry; private final boolean isApplicationUpgrade; private Job(Application application, JobRun triggering, JobType jobType, Instant availableSince, boolean isRetry, boolean isApplicationUpgrade) { super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()); this.jobType = jobType; this.triggering = triggering; this.availableSince = availableSince; this.isRetry = isRetry; this.isApplicationUpgrade = isApplicationUpgrade; } JobType jobType() { return jobType; } Instant availableSince() { return availableSince; } boolean isRetry() { return isRetry; } boolean applicationUpgrade() { return isApplicationUpgrade; } } private static class Versions { private final Version targetPlatform; private final ApplicationVersion targetApplication; private final Optional<Version> sourcePlatform; private final Optional<ApplicationVersion> sourceApplication; private Versions(Version targetPlatform, ApplicationVersion targetApplication, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication) { this.targetPlatform = targetPlatform; this.targetApplication = targetApplication; this.sourcePlatform = sourcePlatform; this.sourceApplication = sourceApplication; } @Override public String toString() { return String.format("platform %s%s, application %s%s", sourcePlatform.filter(source -> ! source.equals(targetPlatform)) .map(source -> source + " -> ").orElse(""), targetPlatform, sourceApplication.filter(source -> ! source.equals(targetApplication)) .map(source -> source.id() + " -> ").orElse(""), targetApplication.id()); } } }
The effective change here is that last job completion is used instead of last deployment for jobs which should be skipped. I think this is an improvement, as it ensures a delay between job completion and the next job. The downside is if someone repeatedly triggers the failing job, causing the delay never to complete, but I don't believe that'll actually be a problem.
private List<Job> computeReadyJobs(ApplicationId id) { List<Job> jobs = new ArrayList<>(); applications().get(id).ifPresent(application -> { Change change = application.changeAt(clock.instant()); Optional<Instant> completedAt = max(application.deploymentJobs().statusOf(systemTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at)), application.deploymentJobs().statusOf(stagingTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at))); String reason = "New change available"; List<Job> testJobs = null; if (change.isPresent()) for (Step step : productionStepsOf(application)) { Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet()); List<JobType> remainingJobs = stepJobs.stream().filter(job -> ! isComplete(change, application, job)).collect(toList()); if ( ! remainingJobs.isEmpty()) { for (JobType job : remainingJobs) { Versions versions = versions(application, change, deploymentFor(application, job)); if (isTested(application, versions)) { if ( completedAt.isPresent() && jobStateOf(application, job) == idle && stepJobs.containsAll(runningProductionJobs(application))) jobs.add(deploymentJob(application, versions, change, job, reason, completedAt.get())); if ( ! alreadyTriggered(application, versions)) testJobs = emptyList(); } else if (testJobs == null) { testJobs = testJobs(application, versions, String.format("Testing deployment for %s (%s)", job.jobName(), versions.toString()), completedAt.orElse(clock.instant())); } } completedAt = Optional.empty(); } else { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = stepJobs.stream().map(job -> application.deploymentJobs().statusOf(job).get().lastCompleted().get().at()).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } } if (testJobs == null) testJobs = testJobs(application, versions(application, application.change(), Optional.empty()), "Testing last changes outside prod", clock.instant()); jobs.addAll(testJobs); }); return jobs; }
completedAt = stepJobs.stream().map(job -> application.deploymentJobs().statusOf(job).get().lastCompleted().get().at()).max(naturalOrder());
private List<Job> computeReadyJobs(ApplicationId id) { List<Job> jobs = new ArrayList<>(); applications().get(id).ifPresent(application -> { Change change = application.changeAt(clock.instant()); Optional<Instant> completedAt = max(application.deploymentJobs().statusOf(systemTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at)), application.deploymentJobs().statusOf(stagingTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at))); String reason = "New change available"; List<Job> testJobs = null; if (change.isPresent()) for (Step step : productionStepsOf(application)) { Set<JobType> stepJobs = step.zones().stream().map(order::toJob).collect(toSet()); List<JobType> remainingJobs = stepJobs.stream().filter(job -> ! isComplete(change, application, job)).collect(toList()); if ( ! remainingJobs.isEmpty()) { for (JobType job : remainingJobs) { Versions versions = versions(application, change, deploymentFor(application, job)); if (isTested(application, versions)) { if ( completedAt.isPresent() && jobStateOf(application, job) == idle && stepJobs.containsAll(runningProductionJobs(application))) jobs.add(deploymentJob(application, versions, change, job, reason, completedAt.get())); if ( ! alreadyTriggered(application, versions)) testJobs = emptyList(); } else if (testJobs == null) { testJobs = testJobs(application, versions, String.format("Testing deployment for %s (%s)", job.jobName(), versions.toString()), completedAt.orElse(clock.instant())); } } completedAt = Optional.empty(); } else { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> ! at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = stepJobs.stream().map(job -> application.deploymentJobs().statusOf(job).get().lastCompleted().get().at()).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } } if (testJobs == null) testJobs = testJobs(application, versions(application, application.change(), Optional.empty()), "Testing last changes outside prod", clock.instant()); jobs.addAll(testJobs); }); return jobs; }
class DeploymentTrigger { private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentOrder order; private final BuildService buildService; public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) { Objects.requireNonNull(controller, "controller cannot be null"); Objects.requireNonNull(curator, "curator cannot be null"); Objects.requireNonNull(clock, "clock cannot be null"); this.controller = controller; this.clock = clock; this.order = new DeploymentOrder(controller::system); this.buildService = buildService; } public DeploymentOrder deploymentOrder() { return order; } /** * Records information when a job completes (successfully or not). This information is used when deciding what to * trigger next. */ public void notifyOfCompletion(JobReport report) { log.log(LogLevel.INFO, String.format("Notified of %s for %s of %s (%d).", report.jobError().map(e -> e.toString() + " error") .orElse("success"), report.jobType(), report.applicationId(), report.projectId())); if ( ! applications().get(report.applicationId()).isPresent()) { log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } applications().lockOrThrow(report.applicationId(), application -> { JobRun triggering; if (report.jobType() == component) { ApplicationVersion applicationVersion = ApplicationVersion.from(report.sourceRevision().get(), report.buildNumber()); triggering = JobRun.triggering(controller.systemVersion(), applicationVersion, Optional .empty(), Optional.empty(), "Application commit", clock.instant()); if (report.success()) { if (acceptNewApplicationVersion(application)) application = application.withChange(application.change().with(applicationVersion)) .withOutstandingChange(Change.empty()); else application = application.withOutstandingChange(Change.of(applicationVersion)); } } else { triggering = application.deploymentJobs().statusOf(report.jobType()).flatMap(JobStatus::lastTriggered) .orElseThrow(() -> new IllegalStateException("Notified of completion of " + report.jobType().jobName() + " for " + report.applicationId() + ", but that has neither been triggered nor deployed")); } application = application.withJobCompletion(report.projectId(), report.jobType(), triggering.completion(report.buildNumber(), clock.instant()), report.jobError()); application = application.withChange(remainingChange(application)); applications().store(application); }); } /** Returns a map of jobs that are scheduled to be run, grouped by the job type */ public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() { return computeReadyJobs().stream().collect(groupingBy(Job::jobType)); } /** * Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs. * * Only one job is triggered each run for test jobs, since their environments have limited capacity. */ public long triggerReadyJobs() { return computeReadyJobs().stream() .collect(partitioningBy(job -> job.jobType().isTest())) .entrySet().stream() .flatMap(entry -> (entry.getKey() ? entry.getValue().stream() .sorted(comparing(Job::isRetry) .thenComparing(Job::applicationUpgrade) .reversed() .thenComparing(Job::availableSince)) .collect(groupingBy(Job::jobType)) : entry.getValue().stream() .collect(groupingBy(Job::applicationId))) .values().stream() .map(jobs -> (Supplier<Long>) jobs.stream() .filter(this::trigger) .limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count)) .parallel().map(Supplier::get).reduce(0L, Long::sum); } /** * Attempts to trigger the given job for the given application and returns the outcome. * * If the build service can not find the given job, or claims it is illegal to trigger it, * the project id is removed from the application owning the job, to prevent further trigger attemps. */ public boolean trigger(Job job) { log.log(LogLevel.INFO, String.format("Triggering %s: %s", job, job.triggering)); try { buildService.trigger(job); applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withJobTriggering(job.jobType, job.triggering))); return true; } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e); if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException) applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withProjectId(OptionalLong.empty()))); return false; } } /** Force triggering of a job for given application. */ public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType, String user) { Application application = applications().require(applicationId); if (jobType == component) { buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); return singletonList(component); } Versions versions = versions(application, application.change(), deploymentFor(application, jobType)); String reason = "Job triggered manually by " + user; return (jobType.isProduction() && ! isTested(application, versions) ? testJobs(application, versions, reason, clock.instant()).stream() : Stream.of(deploymentJob(application, versions, application.change(), jobType, reason, clock.instant()))) .peek(this::trigger) .map(Job::jobType).collect(toList()); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already has an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application); }); } /** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .filter(__ -> keepApplicationChange) .map(Change::of) .orElse(Change.empty()))); }); } private ApplicationController applications() { return controller.applications(); } private Optional<JobRun> successOn(Application application, JobType jobType, Versions versions) { return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .filter(run -> targetsMatch(versions, run)); } private Optional<Deployment> deploymentFor(Application application, JobType jobType) { return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()).get())); } private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) { return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2; } /** Returns the set of all jobs which have changes to propagate from the upstream steps. */ private List<Job> computeReadyJobs() { return ApplicationList.from(applications().asList()) .notPullRequest() .withProjectId() .deploying() .idList().stream() .map(this::computeReadyJobs) .flatMap(Collection::stream) .collect(toList()); } /** * Finds the next step to trigger for the given application, if any, and returns these as a list. */ private List<JobType> runningProductionJobs(Application application) { return application.deploymentJobs().jobStatus().keySet().parallelStream() .filter(JobType::isProduction) .filter(job -> isRunning(application, job)) .collect(toList()); } /** Returns whether the given job is currently running; false if completed since last triggered, asking the build service otherwise. */ private boolean isRunning(Application application, JobType jobType) { return ! application.deploymentJobs().statusOf(jobType) .flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))) .orElse(false) && EnumSet.of(running, queued).contains(jobStateOf(application, jobType)); } private JobState jobStateOf(Application application, JobType jobType) { return buildService.stateOf(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); } /** * Returns whether the given change is complete for the given application for the given job. * * Any job is complete if the given change is already successful on that job. * A production job is also considered complete if its current change is strictly dominated by what * is already deployed in its zone, i.e., no parts of the change are upgrades, and the full current * change for the application downgrades the deployment, which is an acknowledgement that the deployed * version is broken somehow, such that the job may be locked in failure until a new version is released. */ private boolean isComplete(Change change, Application application, JobType jobType) { Optional<Deployment> existingDeployment = deploymentFor(application, jobType); return successOn(application, jobType, versions(application, change, existingDeployment)).isPresent() || jobType.isProduction() && existingDeployment.map(deployment -> ! isUpgrade(change, deployment) && isDowngrade(application.change(), deployment)) .orElse(false); } private static boolean isUpgrade(Change change, Deployment deployment) { return change.upgrades(deployment.version()) || change.upgrades(deployment.applicationVersion()); } private static boolean isDowngrade(Change change, Deployment deployment) { return change.downgrades(deployment.version()) || change.downgrades(deployment.applicationVersion()); } private boolean isTested(Application application, Versions versions) { return testedAt(application, versions).isPresent() || alreadyTriggered(application, versions); } private Optional<Instant> testedAt(Application application, Versions versions) { Optional<JobRun> testRun = successOn(application, systemTest, versions); Optional<JobRun> stagingRun = successOn(application, stagingTest, versions) .filter(run -> sourcesMatchIfPresent(versions, run)); return max(testRun.map(JobRun::at), stagingRun.map(JobRun::at)) .filter(__ -> testRun.isPresent() && stagingRun.isPresent()); } private boolean alreadyTriggered(Application application, Versions versions) { return application.deploymentJobs().jobStatus().values().stream() .filter(job -> job.type().isProduction()) .anyMatch(job -> job.lastTriggered() .filter(run -> targetsMatch(versions, run)) .filter(run -> sourcesMatchIfPresent(versions, run)) .isPresent()); } /** If the given state's sources are present and differ from its targets, returns whether they are equal to those * of the given job run. */ private static boolean sourcesMatchIfPresent(Versions versions, JobRun jobRun) { return ( ! versions.sourcePlatform.filter(version -> ! version.equals(versions.targetPlatform)).isPresent() || versions.sourcePlatform.equals(jobRun.sourcePlatform())) && ( ! versions.sourceApplication.filter(version -> ! version.equals(versions.targetApplication)).isPresent() || versions.sourceApplication.equals(jobRun.sourceApplication())); } private static boolean targetsMatch(Versions versions, JobRun jobRun) { return versions.targetPlatform.equals(jobRun.platform()) && versions.targetApplication.equals(jobRun.application()); } private boolean acceptNewApplicationVersion(Application application) { if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; return ! application.changeAt(clock.instant()).platform().isPresent(); } private Change remainingChange(Application application) { List<JobType> jobs = productionStepsOf(application).isEmpty() ? jobsOf(testStepsOf(application)) : jobsOf(productionStepsOf(application)); Change change = application.change(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutApplication(), application, job))) change = change.withoutPlatform(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutPlatform(), application, job))) change = change.withoutApplication(); return change; } /** * Returns the list of test jobs that should run now, and that need to succeed on the given versions for it to be considered tested. */ private List<Job> testJobs(Application application, Versions versions, String reason, Instant availableSince) { List<Job> jobs = new ArrayList<>(); for (JobType jobType : jobsOf(testStepsOf(application))) { Optional<JobRun> completion = successOn(application, jobType, versions) .filter(run -> sourcesMatchIfPresent(versions, run) || jobType == systemTest); if ( ! completion.isPresent() && jobStateOf(application, jobType) == idle) jobs.add(deploymentJob(application, versions, application.change(), jobType, reason, availableSince)); } return jobs; } private List<JobType> jobsOf(Collection<Step> steps) { return steps.stream().flatMap(step -> step.zones().stream()).map(order::toJob).collect(toList()); } private List<Step> testStepsOf(Application application) { return application.deploymentSpec().steps().isEmpty() ? singletonList(new DeploymentSpec.DeclaredZone(test)) : application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(test) || step.deploysTo(staging)) .collect(toList()); } private List<Step> productionStepsOf(Application application) { return application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(prod) || step.zones().isEmpty()) .collect(toList()); } private Job deploymentJob(Application application, Versions versions, Change change, JobType jobType, String reason, Instant availableSince) { boolean isRetry = application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::jobError) .filter(JobError.outOfCapacity::equals).isPresent(); if (isRetry) reason += "; retrying on out of capacity"; JobRun triggering = JobRun.triggering(versions.targetPlatform, versions.targetApplication, versions.sourcePlatform, versions.sourceApplication, reason, clock.instant()); return new Job(application, triggering, jobType, availableSince, isRetry, change.application().isPresent()); } private Versions versions(Application application, Change change, Optional<Deployment> deployment) { return new Versions(targetPlatform(application, change, deployment), targetApplication(application, change, deployment), deployment.map(Deployment::version), deployment.map(Deployment::applicationVersion)); } private Version targetPlatform(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::version), change.platform()) .orElse(application.oldestDeployedPlatform() .orElse(controller.systemVersion())); } private ApplicationVersion targetApplication(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::applicationVersion), change.application()) .orElse(application.oldestDeployedApplication() .orElse(application.deploymentJobs().jobStatus().get(component).lastSuccess().get().application())); } private static class Job extends BuildJob { private final JobType jobType; private final JobRun triggering; private final Instant availableSince; private final boolean isRetry; private final boolean isApplicationUpgrade; private Job(Application application, JobRun triggering, JobType jobType, Instant availableSince, boolean isRetry, boolean isApplicationUpgrade) { super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()); this.jobType = jobType; this.triggering = triggering; this.availableSince = availableSince; this.isRetry = isRetry; this.isApplicationUpgrade = isApplicationUpgrade; } JobType jobType() { return jobType; } Instant availableSince() { return availableSince; } boolean isRetry() { return isRetry; } boolean applicationUpgrade() { return isApplicationUpgrade; } } private static class Versions { private final Version targetPlatform; private final ApplicationVersion targetApplication; private final Optional<Version> sourcePlatform; private final Optional<ApplicationVersion> sourceApplication; private Versions(Version targetPlatform, ApplicationVersion targetApplication, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication) { this.targetPlatform = targetPlatform; this.targetApplication = targetApplication; this.sourcePlatform = sourcePlatform; this.sourceApplication = sourceApplication; } @Override public String toString() { return String.format("platform %s%s, application %s%s", sourcePlatform.filter(source -> ! source.equals(targetPlatform)) .map(source -> source + " -> ").orElse(""), targetPlatform, sourceApplication.filter(source -> ! source.equals(targetApplication)) .map(source -> source.id() + " -> ").orElse(""), targetApplication.id()); } } }
class DeploymentTrigger { private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final DeploymentOrder order; private final BuildService buildService; public DeploymentTrigger(Controller controller, CuratorDb curator, BuildService buildService, Clock clock) { Objects.requireNonNull(controller, "controller cannot be null"); Objects.requireNonNull(curator, "curator cannot be null"); Objects.requireNonNull(clock, "clock cannot be null"); this.controller = controller; this.clock = clock; this.order = new DeploymentOrder(controller::system); this.buildService = buildService; } public DeploymentOrder deploymentOrder() { return order; } /** * Records information when a job completes (successfully or not). This information is used when deciding what to * trigger next. */ public void notifyOfCompletion(JobReport report) { log.log(LogLevel.INFO, String.format("Notified of %s for %s of %s (%d).", report.jobError().map(e -> e.toString() + " error") .orElse("success"), report.jobType(), report.applicationId(), report.projectId())); if ( ! applications().get(report.applicationId()).isPresent()) { log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } applications().lockOrThrow(report.applicationId(), application -> { JobRun triggering; if (report.jobType() == component) { ApplicationVersion applicationVersion = ApplicationVersion.from(report.sourceRevision().get(), report.buildNumber()); triggering = JobRun.triggering(controller.systemVersion(), applicationVersion, Optional .empty(), Optional.empty(), "Application commit", clock.instant()); if (report.success()) { if (acceptNewApplicationVersion(application)) application = application.withChange(application.change().with(applicationVersion)) .withOutstandingChange(Change.empty()); else application = application.withOutstandingChange(Change.of(applicationVersion)); } } else { triggering = application.deploymentJobs().statusOf(report.jobType()).flatMap(JobStatus::lastTriggered) .orElseThrow(() -> new IllegalStateException("Notified of completion of " + report.jobType().jobName() + " for " + report.applicationId() + ", but that has neither been triggered nor deployed")); } application = application.withJobCompletion(report.projectId(), report.jobType(), triggering.completion(report.buildNumber(), clock.instant()), report.jobError()); application = application.withChange(remainingChange(application)); applications().store(application); }); } /** Returns a map of jobs that are scheduled to be run, grouped by the job type */ public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() { return computeReadyJobs().stream().collect(groupingBy(Job::jobType)); } /** * Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs. * * Only one job is triggered each run for test jobs, since their environments have limited capacity. */ public long triggerReadyJobs() { return computeReadyJobs().stream() .collect(partitioningBy(job -> job.jobType().isTest())) .entrySet().stream() .flatMap(entry -> (entry.getKey() ? entry.getValue().stream() .sorted(comparing(Job::isRetry) .thenComparing(Job::applicationUpgrade) .reversed() .thenComparing(Job::availableSince)) .collect(groupingBy(Job::jobType)) : entry.getValue().stream() .collect(groupingBy(Job::applicationId))) .values().stream() .map(jobs -> (Supplier<Long>) jobs.stream() .filter(this::trigger) .limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count)) .parallel().map(Supplier::get).reduce(0L, Long::sum); } /** * Attempts to trigger the given job for the given application and returns the outcome. * * If the build service can not find the given job, or claims it is illegal to trigger it, * the project id is removed from the application owning the job, to prevent further trigger attemps. */ public boolean trigger(Job job) { log.log(LogLevel.INFO, String.format("Triggering %s: %s", job, job.triggering)); try { buildService.trigger(job); applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withJobTriggering(job.jobType, job.triggering))); return true; } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e); if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException) applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withProjectId(OptionalLong.empty()))); return false; } } /** Force triggering of a job for given application. */ public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType, String user) { Application application = applications().require(applicationId); if (jobType == component) { buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); return singletonList(component); } Versions versions = versions(application, application.change(), deploymentFor(application, jobType)); String reason = "Job triggered manually by " + user; return (jobType.isProduction() && ! isTested(application, versions) ? testJobs(application, versions, reason, clock.instant()).stream() : Stream.of(deploymentJob(application, versions, application.change(), jobType, reason, clock.instant()))) .peek(this::trigger) .map(Job::jobType).collect(toList()); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already has an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.change().isPresent() && ! application.deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application); }); } /** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.change().application() .filter(__ -> keepApplicationChange) .map(Change::of) .orElse(Change.empty()))); }); } private ApplicationController applications() { return controller.applications(); } private Optional<JobRun> successOn(Application application, JobType jobType, Versions versions) { return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .filter(run -> targetsMatch(versions, run)); } private Optional<Deployment> deploymentFor(Application application, JobType jobType) { return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()).get())); } private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) { return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2; } /** Returns the set of all jobs which have changes to propagate from the upstream steps. */ private List<Job> computeReadyJobs() { return ApplicationList.from(applications().asList()) .notPullRequest() .withProjectId() .deploying() .idList().stream() .map(this::computeReadyJobs) .flatMap(Collection::stream) .collect(toList()); } /** * Finds the next step to trigger for the given application, if any, and returns these as a list. */ private List<JobType> runningProductionJobs(Application application) { return application.deploymentJobs().jobStatus().keySet().parallelStream() .filter(JobType::isProduction) .filter(job -> isRunning(application, job)) .collect(toList()); } /** Returns whether the given job is currently running; false if completed since last triggered, asking the build service otherwise. */ private boolean isRunning(Application application, JobType jobType) { return ! application.deploymentJobs().statusOf(jobType) .flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))) .orElse(false) && EnumSet.of(running, queued).contains(jobStateOf(application, jobType)); } private JobState jobStateOf(Application application, JobType jobType) { return buildService.stateOf(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); } /** * Returns whether the given change is complete for the given application for the given job. * * Any job is complete if the given change is already successful on that job. * A production job is also considered complete if its current change is strictly dominated by what * is already deployed in its zone, i.e., no parts of the change are upgrades, and the full current * change for the application downgrades the deployment, which is an acknowledgement that the deployed * version is broken somehow, such that the job may be locked in failure until a new version is released. */ private boolean isComplete(Change change, Application application, JobType jobType) { Optional<Deployment> existingDeployment = deploymentFor(application, jobType); return successOn(application, jobType, versions(application, change, existingDeployment)).isPresent() || jobType.isProduction() && existingDeployment.map(deployment -> ! isUpgrade(change, deployment) && isDowngrade(application.change(), deployment)) .orElse(false); } private static boolean isUpgrade(Change change, Deployment deployment) { return change.upgrades(deployment.version()) || change.upgrades(deployment.applicationVersion()); } private static boolean isDowngrade(Change change, Deployment deployment) { return change.downgrades(deployment.version()) || change.downgrades(deployment.applicationVersion()); } private boolean isTested(Application application, Versions versions) { return testedAt(application, versions).isPresent() || alreadyTriggered(application, versions); } private Optional<Instant> testedAt(Application application, Versions versions) { Optional<JobRun> testRun = successOn(application, systemTest, versions); Optional<JobRun> stagingRun = successOn(application, stagingTest, versions) .filter(run -> sourcesMatchIfPresent(versions, run)); return max(testRun.map(JobRun::at), stagingRun.map(JobRun::at)) .filter(__ -> testRun.isPresent() && stagingRun.isPresent()); } private boolean alreadyTriggered(Application application, Versions versions) { return application.deploymentJobs().jobStatus().values().stream() .filter(job -> job.type().isProduction()) .anyMatch(job -> job.lastTriggered() .filter(run -> targetsMatch(versions, run)) .filter(run -> sourcesMatchIfPresent(versions, run)) .isPresent()); } /** If the given state's sources are present and differ from its targets, returns whether they are equal to those * of the given job run. */ private static boolean sourcesMatchIfPresent(Versions versions, JobRun jobRun) { return ( ! versions.sourcePlatform.filter(version -> ! version.equals(versions.targetPlatform)).isPresent() || versions.sourcePlatform.equals(jobRun.sourcePlatform())) && ( ! versions.sourceApplication.filter(version -> ! version.equals(versions.targetApplication)).isPresent() || versions.sourceApplication.equals(jobRun.sourceApplication())); } private static boolean targetsMatch(Versions versions, JobRun jobRun) { return versions.targetPlatform.equals(jobRun.platform()) && versions.targetApplication.equals(jobRun.application()); } private boolean acceptNewApplicationVersion(Application application) { if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; return ! application.changeAt(clock.instant()).platform().isPresent(); } private Change remainingChange(Application application) { List<JobType> jobs = productionStepsOf(application).isEmpty() ? jobsOf(testStepsOf(application)) : jobsOf(productionStepsOf(application)); Change change = application.change(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutApplication(), application, job))) change = change.withoutPlatform(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutPlatform(), application, job))) change = change.withoutApplication(); return change; } /** * Returns the list of test jobs that should run now, and that need to succeed on the given versions for it to be considered tested. */ private List<Job> testJobs(Application application, Versions versions, String reason, Instant availableSince) { List<Job> jobs = new ArrayList<>(); for (JobType jobType : jobsOf(testStepsOf(application))) { Optional<JobRun> completion = successOn(application, jobType, versions) .filter(run -> sourcesMatchIfPresent(versions, run) || jobType == systemTest); if ( ! completion.isPresent() && jobStateOf(application, jobType) == idle) jobs.add(deploymentJob(application, versions, application.change(), jobType, reason, availableSince)); } return jobs; } private List<JobType> jobsOf(Collection<Step> steps) { return steps.stream().flatMap(step -> step.zones().stream()).map(order::toJob).collect(toList()); } private List<Step> testStepsOf(Application application) { return application.deploymentSpec().steps().isEmpty() ? singletonList(new DeploymentSpec.DeclaredZone(test)) : application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(test) || step.deploysTo(staging)) .collect(toList()); } private List<Step> productionStepsOf(Application application) { return application.deploymentSpec().steps().stream() .filter(step -> step.deploysTo(prod) || step.zones().isEmpty()) .collect(toList()); } private Job deploymentJob(Application application, Versions versions, Change change, JobType jobType, String reason, Instant availableSince) { boolean isRetry = application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::jobError) .filter(JobError.outOfCapacity::equals).isPresent(); if (isRetry) reason += "; retrying on out of capacity"; JobRun triggering = JobRun.triggering(versions.targetPlatform, versions.targetApplication, versions.sourcePlatform, versions.sourceApplication, reason, clock.instant()); return new Job(application, triggering, jobType, availableSince, isRetry, change.application().isPresent()); } private Versions versions(Application application, Change change, Optional<Deployment> deployment) { return new Versions(targetPlatform(application, change, deployment), targetApplication(application, change, deployment), deployment.map(Deployment::version), deployment.map(Deployment::applicationVersion)); } private Version targetPlatform(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::version), change.platform()) .orElse(application.oldestDeployedPlatform() .orElse(controller.systemVersion())); } private ApplicationVersion targetApplication(Application application, Change change, Optional<Deployment> deployment) { return max(deployment.map(Deployment::applicationVersion), change.application()) .orElse(application.oldestDeployedApplication() .orElse(application.deploymentJobs().jobStatus().get(component).lastSuccess().get().application())); } private static class Job extends BuildJob { private final JobType jobType; private final JobRun triggering; private final Instant availableSince; private final boolean isRetry; private final boolean isApplicationUpgrade; private Job(Application application, JobRun triggering, JobType jobType, Instant availableSince, boolean isRetry, boolean isApplicationUpgrade) { super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()); this.jobType = jobType; this.triggering = triggering; this.availableSince = availableSince; this.isRetry = isRetry; this.isApplicationUpgrade = isApplicationUpgrade; } JobType jobType() { return jobType; } Instant availableSince() { return availableSince; } boolean isRetry() { return isRetry; } boolean applicationUpgrade() { return isApplicationUpgrade; } } private static class Versions { private final Version targetPlatform; private final ApplicationVersion targetApplication; private final Optional<Version> sourcePlatform; private final Optional<ApplicationVersion> sourceApplication; private Versions(Version targetPlatform, ApplicationVersion targetApplication, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication) { this.targetPlatform = targetPlatform; this.targetApplication = targetApplication; this.sourcePlatform = sourcePlatform; this.sourceApplication = sourceApplication; } @Override public String toString() { return String.format("platform %s%s, application %s%s", sourcePlatform.filter(source -> ! source.equals(targetPlatform)) .map(source -> source + " -> ").orElse(""), targetPlatform, sourceApplication.filter(source -> ! source.equals(targetApplication)) .map(source -> source.id() + " -> ").orElse(""), targetApplication.id()); } } }
Is the "document" part of the name necessary elsewhere (SignedIdentityDocument -> SignedIdentity)? Sounds like "document" is something one would call a serialized and portable ASCII representation of the identity, which would be perfect for the entity/response/resource, but not internally when deserialized and represented by a proper Java object.
private SignedIdentityDocument getIdentityDocument(String host, String type) { try (CloseableHttpClient client = createHttpClient(sslContextSupplier.get(), hostnameVerifier)) { URI uri = configserverUri .resolve(IDENTITY_DOCUMENT_API) .resolve(type + '/') .resolve(host); HttpUriRequest request = RequestBuilder.get() .setUri(uri) .addHeader("Connection", "close") .addHeader("Accept", "application/json") .build(); try (CloseableHttpResponse response = client.execute(request)) { String responseContent = EntityUtils.toString(response.getEntity()); if (HttpStatus.isSuccess(response.getStatusLine().getStatusCode())) { com.yahoo.vespa.athenz.identityprovider.api.bindings.SignedIdentityDocument entity = objectMapper.readValue( responseContent, com.yahoo.vespa.athenz.identityprovider.api.bindings.SignedIdentityDocument.class); return new SignedIdentityDocument( toEntityDocument(entity.identityDocument), entity.signature, entity.signingKeyVersion, VespaUniqueInstanceId.fromDottedString(entity.providerUniqueId), entity.dnsSuffix, (AthenzService) AthenzIdentities.from(entity.providerService), entity.ztsEndpoint, entity.documentVersion); } else { throw new RuntimeException( String.format( "Failed to retrieve identity document for host %s: %d - %s", host, response.getStatusLine().getStatusCode(), responseContent)); } } } catch (IOException e) { throw new UncheckedIOException(e); } }
com.yahoo.vespa.athenz.identityprovider.api.bindings.SignedIdentityDocument entity =
private SignedIdentityDocument getIdentityDocument(String host, String type) { try (CloseableHttpClient client = createHttpClient(sslContextSupplier.get(), hostnameVerifier)) { URI uri = configserverUri .resolve(IDENTITY_DOCUMENT_API) .resolve(type + '/') .resolve(host); HttpUriRequest request = RequestBuilder.get() .setUri(uri) .addHeader("Connection", "close") .addHeader("Accept", "application/json") .build(); try (CloseableHttpResponse response = client.execute(request)) { String responseContent = EntityUtils.toString(response.getEntity()); if (HttpStatus.isSuccess(response.getStatusLine().getStatusCode())) { com.yahoo.vespa.athenz.identityprovider.api.bindings.SignedIdentityDocument entity = objectMapper.readValue( responseContent, com.yahoo.vespa.athenz.identityprovider.api.bindings.SignedIdentityDocument.class); return new SignedIdentityDocument( toEntityDocument(entity.identityDocument), entity.signature, entity.signingKeyVersion, VespaUniqueInstanceId.fromDottedString(entity.providerUniqueId), entity.dnsSuffix, (AthenzService) AthenzIdentities.from(entity.providerService), entity.ztsEndpoint, entity.documentVersion); } else { throw new RuntimeException( String.format( "Failed to retrieve identity document for host %s: %d - %s", host, response.getStatusLine().getStatusCode(), responseContent)); } } } catch (IOException e) { throw new UncheckedIOException(e); } }
class DefaultIdentityDocumentClient implements IdentityDocumentClient { private static final String IDENTITY_DOCUMENT_API = "/athenz/v1/provider/identity-document/"; private static final ObjectMapper objectMapper = new ObjectMapper(); private final Supplier<SSLContext> sslContextSupplier; private final HostnameVerifier hostnameVerifier; private final URI configserverUri; public DefaultIdentityDocumentClient(URI configserverUri, SSLContext sslContext, HostnameVerifier hostnameVerifier) { this.configserverUri = configserverUri; this.sslContextSupplier = () -> sslContext; this.hostnameVerifier = hostnameVerifier; } public DefaultIdentityDocumentClient(URI configserverUri, ServiceIdentityProvider identityProvider, HostnameVerifier hostnameVerifier) { this.configserverUri = configserverUri; this.sslContextSupplier = identityProvider::getIdentitySslContext; this.hostnameVerifier = hostnameVerifier; } @Override public SignedIdentityDocument getNodeIdentityDocument(String host) { return getIdentityDocument(host, "node"); } @Override public SignedIdentityDocument getTenantIdentityDocument(String host) { return getIdentityDocument(host, "tenant"); } private static IdentityDocument toEntityDocument( com.yahoo.vespa.athenz.identityprovider.api.bindings.IdentityDocument identityDocument) { return new IdentityDocument( identityDocument.providerUniqueId.toVespaUniqueInstanceId(), identityDocument.configServerHostname, identityDocument.instanceHostname, identityDocument.createdAt, identityDocument.ipAddresses); } private static CloseableHttpClient createHttpClient(SSLContext sslContext, HostnameVerifier hostnameVerifier) { return HttpClientBuilder.create() .setRetryHandler(new DefaultHttpRequestRetryHandler(3, /*requestSentRetryEnabled*/true)) .setSSLContext(sslContext) .setSSLHostnameVerifier(hostnameVerifier) .setUserAgent("default-identity-document-client") .build(); } }
class DefaultIdentityDocumentClient implements IdentityDocumentClient { private static final String IDENTITY_DOCUMENT_API = "/athenz/v1/provider/identity-document/"; private static final ObjectMapper objectMapper = new ObjectMapper(); private final Supplier<SSLContext> sslContextSupplier; private final HostnameVerifier hostnameVerifier; private final URI configserverUri; public DefaultIdentityDocumentClient(URI configserverUri, SSLContext sslContext, HostnameVerifier hostnameVerifier) { this.configserverUri = configserverUri; this.sslContextSupplier = () -> sslContext; this.hostnameVerifier = hostnameVerifier; } public DefaultIdentityDocumentClient(URI configserverUri, ServiceIdentityProvider identityProvider, HostnameVerifier hostnameVerifier) { this.configserverUri = configserverUri; this.sslContextSupplier = identityProvider::getIdentitySslContext; this.hostnameVerifier = hostnameVerifier; } @Override public SignedIdentityDocument getNodeIdentityDocument(String host) { return getIdentityDocument(host, "node"); } @Override public SignedIdentityDocument getTenantIdentityDocument(String host) { return getIdentityDocument(host, "tenant"); } private static IdentityDocument toEntityDocument( com.yahoo.vespa.athenz.identityprovider.api.bindings.IdentityDocument identityDocument) { return new IdentityDocument( identityDocument.providerUniqueId.toVespaUniqueInstanceId(), identityDocument.configServerHostname, identityDocument.instanceHostname, identityDocument.createdAt, identityDocument.ipAddresses); } private static CloseableHttpClient createHttpClient(SSLContext sslContext, HostnameVerifier hostnameVerifier) { return HttpClientBuilder.create() .setRetryHandler(new DefaultHttpRequestRetryHandler(3, /*requestSentRetryEnabled*/true)) .setSSLContext(sslContext) .setSSLHostnameVerifier(hostnameVerifier) .setUserAgent("default-identity-document-client") .build(); } }
_Signed identity document_, _identity document_ and _instance_ are all concepts from Athenz. The concept _identity document_ is used multiple places in the Athenz identity provider spec and our implementation.
private SignedIdentityDocument getIdentityDocument(String host, String type) { try (CloseableHttpClient client = createHttpClient(sslContextSupplier.get(), hostnameVerifier)) { URI uri = configserverUri .resolve(IDENTITY_DOCUMENT_API) .resolve(type + '/') .resolve(host); HttpUriRequest request = RequestBuilder.get() .setUri(uri) .addHeader("Connection", "close") .addHeader("Accept", "application/json") .build(); try (CloseableHttpResponse response = client.execute(request)) { String responseContent = EntityUtils.toString(response.getEntity()); if (HttpStatus.isSuccess(response.getStatusLine().getStatusCode())) { com.yahoo.vespa.athenz.identityprovider.api.bindings.SignedIdentityDocument entity = objectMapper.readValue( responseContent, com.yahoo.vespa.athenz.identityprovider.api.bindings.SignedIdentityDocument.class); return new SignedIdentityDocument( toEntityDocument(entity.identityDocument), entity.signature, entity.signingKeyVersion, VespaUniqueInstanceId.fromDottedString(entity.providerUniqueId), entity.dnsSuffix, (AthenzService) AthenzIdentities.from(entity.providerService), entity.ztsEndpoint, entity.documentVersion); } else { throw new RuntimeException( String.format( "Failed to retrieve identity document for host %s: %d - %s", host, response.getStatusLine().getStatusCode(), responseContent)); } } } catch (IOException e) { throw new UncheckedIOException(e); } }
com.yahoo.vespa.athenz.identityprovider.api.bindings.SignedIdentityDocument entity =
private SignedIdentityDocument getIdentityDocument(String host, String type) { try (CloseableHttpClient client = createHttpClient(sslContextSupplier.get(), hostnameVerifier)) { URI uri = configserverUri .resolve(IDENTITY_DOCUMENT_API) .resolve(type + '/') .resolve(host); HttpUriRequest request = RequestBuilder.get() .setUri(uri) .addHeader("Connection", "close") .addHeader("Accept", "application/json") .build(); try (CloseableHttpResponse response = client.execute(request)) { String responseContent = EntityUtils.toString(response.getEntity()); if (HttpStatus.isSuccess(response.getStatusLine().getStatusCode())) { com.yahoo.vespa.athenz.identityprovider.api.bindings.SignedIdentityDocument entity = objectMapper.readValue( responseContent, com.yahoo.vespa.athenz.identityprovider.api.bindings.SignedIdentityDocument.class); return new SignedIdentityDocument( toEntityDocument(entity.identityDocument), entity.signature, entity.signingKeyVersion, VespaUniqueInstanceId.fromDottedString(entity.providerUniqueId), entity.dnsSuffix, (AthenzService) AthenzIdentities.from(entity.providerService), entity.ztsEndpoint, entity.documentVersion); } else { throw new RuntimeException( String.format( "Failed to retrieve identity document for host %s: %d - %s", host, response.getStatusLine().getStatusCode(), responseContent)); } } } catch (IOException e) { throw new UncheckedIOException(e); } }
class DefaultIdentityDocumentClient implements IdentityDocumentClient { private static final String IDENTITY_DOCUMENT_API = "/athenz/v1/provider/identity-document/"; private static final ObjectMapper objectMapper = new ObjectMapper(); private final Supplier<SSLContext> sslContextSupplier; private final HostnameVerifier hostnameVerifier; private final URI configserverUri; public DefaultIdentityDocumentClient(URI configserverUri, SSLContext sslContext, HostnameVerifier hostnameVerifier) { this.configserverUri = configserverUri; this.sslContextSupplier = () -> sslContext; this.hostnameVerifier = hostnameVerifier; } public DefaultIdentityDocumentClient(URI configserverUri, ServiceIdentityProvider identityProvider, HostnameVerifier hostnameVerifier) { this.configserverUri = configserverUri; this.sslContextSupplier = identityProvider::getIdentitySslContext; this.hostnameVerifier = hostnameVerifier; } @Override public SignedIdentityDocument getNodeIdentityDocument(String host) { return getIdentityDocument(host, "node"); } @Override public SignedIdentityDocument getTenantIdentityDocument(String host) { return getIdentityDocument(host, "tenant"); } private static IdentityDocument toEntityDocument( com.yahoo.vespa.athenz.identityprovider.api.bindings.IdentityDocument identityDocument) { return new IdentityDocument( identityDocument.providerUniqueId.toVespaUniqueInstanceId(), identityDocument.configServerHostname, identityDocument.instanceHostname, identityDocument.createdAt, identityDocument.ipAddresses); } private static CloseableHttpClient createHttpClient(SSLContext sslContext, HostnameVerifier hostnameVerifier) { return HttpClientBuilder.create() .setRetryHandler(new DefaultHttpRequestRetryHandler(3, /*requestSentRetryEnabled*/true)) .setSSLContext(sslContext) .setSSLHostnameVerifier(hostnameVerifier) .setUserAgent("default-identity-document-client") .build(); } }
class DefaultIdentityDocumentClient implements IdentityDocumentClient { private static final String IDENTITY_DOCUMENT_API = "/athenz/v1/provider/identity-document/"; private static final ObjectMapper objectMapper = new ObjectMapper(); private final Supplier<SSLContext> sslContextSupplier; private final HostnameVerifier hostnameVerifier; private final URI configserverUri; public DefaultIdentityDocumentClient(URI configserverUri, SSLContext sslContext, HostnameVerifier hostnameVerifier) { this.configserverUri = configserverUri; this.sslContextSupplier = () -> sslContext; this.hostnameVerifier = hostnameVerifier; } public DefaultIdentityDocumentClient(URI configserverUri, ServiceIdentityProvider identityProvider, HostnameVerifier hostnameVerifier) { this.configserverUri = configserverUri; this.sslContextSupplier = identityProvider::getIdentitySslContext; this.hostnameVerifier = hostnameVerifier; } @Override public SignedIdentityDocument getNodeIdentityDocument(String host) { return getIdentityDocument(host, "node"); } @Override public SignedIdentityDocument getTenantIdentityDocument(String host) { return getIdentityDocument(host, "tenant"); } private static IdentityDocument toEntityDocument( com.yahoo.vespa.athenz.identityprovider.api.bindings.IdentityDocument identityDocument) { return new IdentityDocument( identityDocument.providerUniqueId.toVespaUniqueInstanceId(), identityDocument.configServerHostname, identityDocument.instanceHostname, identityDocument.createdAt, identityDocument.ipAddresses); } private static CloseableHttpClient createHttpClient(SSLContext sslContext, HostnameVerifier hostnameVerifier) { return HttpClientBuilder.create() .setRetryHandler(new DefaultHttpRequestRetryHandler(3, /*requestSentRetryEnabled*/true)) .setSSLContext(sslContext) .setSSLHostnameVerifier(hostnameVerifier) .setUserAgent("default-identity-document-client") .build(); } }
Should this be redundancy.searchableCopies() instead?
public void handleRedundancy(Redundancy redundancy) { if (hasIndexedCluster()) { if (usesHierarchicDistribution()) { indexedCluster.setMaxNodesDownPerFixedRow((redundancy.effectiveFinalRedundancy() / groupToSpecMap.size()) - 1); } indexedCluster.setSearchableCopies(redundancy.readyCopies()); } this.redundancy = redundancy; }
indexedCluster.setSearchableCopies(redundancy.readyCopies());
public void handleRedundancy(Redundancy redundancy) { if (hasIndexedCluster()) { if (usesHierarchicDistribution()) { indexedCluster.setMaxNodesDownPerFixedRow((redundancy.effectiveFinalRedundancy() / groupToSpecMap.size()) - 1); } indexedCluster.setSearchableCopies(redundancy.searchableCopies()); } this.redundancy = redundancy; }
class Builder extends VespaDomBuilder.DomConfigProducerBuilder<ContentSearchCluster> { private final Map<String, NewDocumentType> documentDefinitions; private final Set<NewDocumentType> globallyDistributedDocuments; public Builder(Map<String, NewDocumentType> documentDefinitions, Set<NewDocumentType> globallyDistributedDocuments) { this.documentDefinitions = documentDefinitions; this.globallyDistributedDocuments = globallyDistributedDocuments; } @Override protected ContentSearchCluster doBuild(AbstractConfigProducer ancestor, Element producerSpec) { ModelElement clusterElem = new ModelElement(producerSpec); String clusterName = ContentCluster.getClusterName(clusterElem); Boolean flushOnShutdownElem = clusterElem.childAsBoolean("engine.proton.flush-on-shutdown"); ContentSearchCluster search = new ContentSearchCluster(ancestor, clusterName, documentDefinitions, globallyDistributedDocuments, getFlushOnShutdown(flushOnShutdownElem, AbstractConfigProducer.deployStateFrom(ancestor))); ModelElement tuning = clusterElem.getChildByPath("engine.proton.tuning"); if (tuning != null) { search.setTuning(new DomSearchTuningBuilder().build(search, tuning.getXml())); } ModelElement protonElem = clusterElem.getChildByPath("engine.proton"); if (protonElem != null) { search.setResourceLimits(DomResourceLimitsBuilder.build(protonElem)); } buildAllStreamingSearchClusters(clusterElem, clusterName, search); buildIndexedSearchCluster(clusterElem, clusterName, search); return search; } private boolean getFlushOnShutdown(Boolean flushOnShutdownElem, DeployState deployState) { if (flushOnShutdownElem != null) { return flushOnShutdownElem; } return (stateIsHosted(deployState) ? false : true); } private Double getQueryTimeout(ModelElement clusterElem) { return clusterElem.childAsDouble("engine.proton.query-timeout"); } private void buildAllStreamingSearchClusters(ModelElement clusterElem, String clusterName, ContentSearchCluster search) { ModelElement docElem = clusterElem.getChild("documents"); if (docElem == null) { return; } for (ModelElement docType : docElem.subElements("document")) { String mode = docType.getStringAttribute("mode"); if ("streaming".equals(mode)) { buildStreamingSearchCluster(clusterElem, clusterName, search, docType); } } } private void buildStreamingSearchCluster(ModelElement clusterElem, String clusterName, ContentSearchCluster search, ModelElement docType) { String docTypeName = docType.getStringAttribute("type"); StreamingSearchCluster cluster = new StreamingSearchCluster(search, clusterName + "." + docTypeName, 0, docTypeName, clusterName); search.addSearchCluster(cluster, getQueryTimeout(clusterElem), Arrays.asList(docType)); } private void buildIndexedSearchCluster(ModelElement clusterElem, String clusterName, ContentSearchCluster search) { List<ModelElement> indexedDefs = getIndexedSearchDefinitions(clusterElem); if (!indexedDefs.isEmpty()) { IndexedSearchCluster isc = new IndexedSearchCluster(search, clusterName, 0); isc.setRoutingSelector(clusterElem.childAsString("documents.selection")); Double visibilityDelay = clusterElem.childAsDouble("engine.proton.visibility-delay"); if (visibilityDelay != null) { isc.setVisibilityDelay(visibilityDelay); } search.addSearchCluster(isc, getQueryTimeout(clusterElem), indexedDefs); } } private List<ModelElement> getIndexedSearchDefinitions(ModelElement clusterElem) { List<ModelElement> indexedDefs = new ArrayList<>(); ModelElement docElem = clusterElem.getChild("documents"); if (docElem == null) { return indexedDefs; } for (ModelElement docType : docElem.subElements("document")) { String mode = docType.getStringAttribute("mode"); if ("index".equals(mode)) { indexedDefs.add(docType); } } return indexedDefs; } }
class Builder extends VespaDomBuilder.DomConfigProducerBuilder<ContentSearchCluster> { private final Map<String, NewDocumentType> documentDefinitions; private final Set<NewDocumentType> globallyDistributedDocuments; public Builder(Map<String, NewDocumentType> documentDefinitions, Set<NewDocumentType> globallyDistributedDocuments) { this.documentDefinitions = documentDefinitions; this.globallyDistributedDocuments = globallyDistributedDocuments; } @Override protected ContentSearchCluster doBuild(AbstractConfigProducer ancestor, Element producerSpec) { ModelElement clusterElem = new ModelElement(producerSpec); String clusterName = ContentCluster.getClusterName(clusterElem); Boolean flushOnShutdownElem = clusterElem.childAsBoolean("engine.proton.flush-on-shutdown"); ContentSearchCluster search = new ContentSearchCluster(ancestor, clusterName, documentDefinitions, globallyDistributedDocuments, getFlushOnShutdown(flushOnShutdownElem, AbstractConfigProducer.deployStateFrom(ancestor))); ModelElement tuning = clusterElem.getChildByPath("engine.proton.tuning"); if (tuning != null) { search.setTuning(new DomSearchTuningBuilder().build(search, tuning.getXml())); } ModelElement protonElem = clusterElem.getChildByPath("engine.proton"); if (protonElem != null) { search.setResourceLimits(DomResourceLimitsBuilder.build(protonElem)); } buildAllStreamingSearchClusters(clusterElem, clusterName, search); buildIndexedSearchCluster(clusterElem, clusterName, search); return search; } private boolean getFlushOnShutdown(Boolean flushOnShutdownElem, DeployState deployState) { if (flushOnShutdownElem != null) { return flushOnShutdownElem; } return (stateIsHosted(deployState) ? false : true); } private Double getQueryTimeout(ModelElement clusterElem) { return clusterElem.childAsDouble("engine.proton.query-timeout"); } private void buildAllStreamingSearchClusters(ModelElement clusterElem, String clusterName, ContentSearchCluster search) { ModelElement docElem = clusterElem.getChild("documents"); if (docElem == null) { return; } for (ModelElement docType : docElem.subElements("document")) { String mode = docType.getStringAttribute("mode"); if ("streaming".equals(mode)) { buildStreamingSearchCluster(clusterElem, clusterName, search, docType); } } } private void buildStreamingSearchCluster(ModelElement clusterElem, String clusterName, ContentSearchCluster search, ModelElement docType) { String docTypeName = docType.getStringAttribute("type"); StreamingSearchCluster cluster = new StreamingSearchCluster(search, clusterName + "." + docTypeName, 0, docTypeName, clusterName); search.addSearchCluster(cluster, getQueryTimeout(clusterElem), Arrays.asList(docType)); } private void buildIndexedSearchCluster(ModelElement clusterElem, String clusterName, ContentSearchCluster search) { List<ModelElement> indexedDefs = getIndexedSearchDefinitions(clusterElem); if (!indexedDefs.isEmpty()) { IndexedSearchCluster isc = new IndexedSearchCluster(search, clusterName, 0); isc.setRoutingSelector(clusterElem.childAsString("documents.selection")); Double visibilityDelay = clusterElem.childAsDouble("engine.proton.visibility-delay"); if (visibilityDelay != null) { isc.setVisibilityDelay(visibilityDelay); } search.addSearchCluster(isc, getQueryTimeout(clusterElem), indexedDefs); } } private List<ModelElement> getIndexedSearchDefinitions(ModelElement clusterElem) { List<ModelElement> indexedDefs = new ArrayList<>(); ModelElement docElem = clusterElem.getChild("documents"); if (docElem == null) { return indexedDefs; } for (ModelElement docType : docElem.subElements("document")) { String mode = docType.getStringAttribute("mode"); if ("index".equals(mode)) { indexedDefs.add(docType); } } return indexedDefs; } }
Correct, fixed
public void handleRedundancy(Redundancy redundancy) { if (hasIndexedCluster()) { if (usesHierarchicDistribution()) { indexedCluster.setMaxNodesDownPerFixedRow((redundancy.effectiveFinalRedundancy() / groupToSpecMap.size()) - 1); } indexedCluster.setSearchableCopies(redundancy.readyCopies()); } this.redundancy = redundancy; }
indexedCluster.setSearchableCopies(redundancy.readyCopies());
public void handleRedundancy(Redundancy redundancy) { if (hasIndexedCluster()) { if (usesHierarchicDistribution()) { indexedCluster.setMaxNodesDownPerFixedRow((redundancy.effectiveFinalRedundancy() / groupToSpecMap.size()) - 1); } indexedCluster.setSearchableCopies(redundancy.searchableCopies()); } this.redundancy = redundancy; }
class Builder extends VespaDomBuilder.DomConfigProducerBuilder<ContentSearchCluster> { private final Map<String, NewDocumentType> documentDefinitions; private final Set<NewDocumentType> globallyDistributedDocuments; public Builder(Map<String, NewDocumentType> documentDefinitions, Set<NewDocumentType> globallyDistributedDocuments) { this.documentDefinitions = documentDefinitions; this.globallyDistributedDocuments = globallyDistributedDocuments; } @Override protected ContentSearchCluster doBuild(AbstractConfigProducer ancestor, Element producerSpec) { ModelElement clusterElem = new ModelElement(producerSpec); String clusterName = ContentCluster.getClusterName(clusterElem); Boolean flushOnShutdownElem = clusterElem.childAsBoolean("engine.proton.flush-on-shutdown"); ContentSearchCluster search = new ContentSearchCluster(ancestor, clusterName, documentDefinitions, globallyDistributedDocuments, getFlushOnShutdown(flushOnShutdownElem, AbstractConfigProducer.deployStateFrom(ancestor))); ModelElement tuning = clusterElem.getChildByPath("engine.proton.tuning"); if (tuning != null) { search.setTuning(new DomSearchTuningBuilder().build(search, tuning.getXml())); } ModelElement protonElem = clusterElem.getChildByPath("engine.proton"); if (protonElem != null) { search.setResourceLimits(DomResourceLimitsBuilder.build(protonElem)); } buildAllStreamingSearchClusters(clusterElem, clusterName, search); buildIndexedSearchCluster(clusterElem, clusterName, search); return search; } private boolean getFlushOnShutdown(Boolean flushOnShutdownElem, DeployState deployState) { if (flushOnShutdownElem != null) { return flushOnShutdownElem; } return (stateIsHosted(deployState) ? false : true); } private Double getQueryTimeout(ModelElement clusterElem) { return clusterElem.childAsDouble("engine.proton.query-timeout"); } private void buildAllStreamingSearchClusters(ModelElement clusterElem, String clusterName, ContentSearchCluster search) { ModelElement docElem = clusterElem.getChild("documents"); if (docElem == null) { return; } for (ModelElement docType : docElem.subElements("document")) { String mode = docType.getStringAttribute("mode"); if ("streaming".equals(mode)) { buildStreamingSearchCluster(clusterElem, clusterName, search, docType); } } } private void buildStreamingSearchCluster(ModelElement clusterElem, String clusterName, ContentSearchCluster search, ModelElement docType) { String docTypeName = docType.getStringAttribute("type"); StreamingSearchCluster cluster = new StreamingSearchCluster(search, clusterName + "." + docTypeName, 0, docTypeName, clusterName); search.addSearchCluster(cluster, getQueryTimeout(clusterElem), Arrays.asList(docType)); } private void buildIndexedSearchCluster(ModelElement clusterElem, String clusterName, ContentSearchCluster search) { List<ModelElement> indexedDefs = getIndexedSearchDefinitions(clusterElem); if (!indexedDefs.isEmpty()) { IndexedSearchCluster isc = new IndexedSearchCluster(search, clusterName, 0); isc.setRoutingSelector(clusterElem.childAsString("documents.selection")); Double visibilityDelay = clusterElem.childAsDouble("engine.proton.visibility-delay"); if (visibilityDelay != null) { isc.setVisibilityDelay(visibilityDelay); } search.addSearchCluster(isc, getQueryTimeout(clusterElem), indexedDefs); } } private List<ModelElement> getIndexedSearchDefinitions(ModelElement clusterElem) { List<ModelElement> indexedDefs = new ArrayList<>(); ModelElement docElem = clusterElem.getChild("documents"); if (docElem == null) { return indexedDefs; } for (ModelElement docType : docElem.subElements("document")) { String mode = docType.getStringAttribute("mode"); if ("index".equals(mode)) { indexedDefs.add(docType); } } return indexedDefs; } }
class Builder extends VespaDomBuilder.DomConfigProducerBuilder<ContentSearchCluster> { private final Map<String, NewDocumentType> documentDefinitions; private final Set<NewDocumentType> globallyDistributedDocuments; public Builder(Map<String, NewDocumentType> documentDefinitions, Set<NewDocumentType> globallyDistributedDocuments) { this.documentDefinitions = documentDefinitions; this.globallyDistributedDocuments = globallyDistributedDocuments; } @Override protected ContentSearchCluster doBuild(AbstractConfigProducer ancestor, Element producerSpec) { ModelElement clusterElem = new ModelElement(producerSpec); String clusterName = ContentCluster.getClusterName(clusterElem); Boolean flushOnShutdownElem = clusterElem.childAsBoolean("engine.proton.flush-on-shutdown"); ContentSearchCluster search = new ContentSearchCluster(ancestor, clusterName, documentDefinitions, globallyDistributedDocuments, getFlushOnShutdown(flushOnShutdownElem, AbstractConfigProducer.deployStateFrom(ancestor))); ModelElement tuning = clusterElem.getChildByPath("engine.proton.tuning"); if (tuning != null) { search.setTuning(new DomSearchTuningBuilder().build(search, tuning.getXml())); } ModelElement protonElem = clusterElem.getChildByPath("engine.proton"); if (protonElem != null) { search.setResourceLimits(DomResourceLimitsBuilder.build(protonElem)); } buildAllStreamingSearchClusters(clusterElem, clusterName, search); buildIndexedSearchCluster(clusterElem, clusterName, search); return search; } private boolean getFlushOnShutdown(Boolean flushOnShutdownElem, DeployState deployState) { if (flushOnShutdownElem != null) { return flushOnShutdownElem; } return (stateIsHosted(deployState) ? false : true); } private Double getQueryTimeout(ModelElement clusterElem) { return clusterElem.childAsDouble("engine.proton.query-timeout"); } private void buildAllStreamingSearchClusters(ModelElement clusterElem, String clusterName, ContentSearchCluster search) { ModelElement docElem = clusterElem.getChild("documents"); if (docElem == null) { return; } for (ModelElement docType : docElem.subElements("document")) { String mode = docType.getStringAttribute("mode"); if ("streaming".equals(mode)) { buildStreamingSearchCluster(clusterElem, clusterName, search, docType); } } } private void buildStreamingSearchCluster(ModelElement clusterElem, String clusterName, ContentSearchCluster search, ModelElement docType) { String docTypeName = docType.getStringAttribute("type"); StreamingSearchCluster cluster = new StreamingSearchCluster(search, clusterName + "." + docTypeName, 0, docTypeName, clusterName); search.addSearchCluster(cluster, getQueryTimeout(clusterElem), Arrays.asList(docType)); } private void buildIndexedSearchCluster(ModelElement clusterElem, String clusterName, ContentSearchCluster search) { List<ModelElement> indexedDefs = getIndexedSearchDefinitions(clusterElem); if (!indexedDefs.isEmpty()) { IndexedSearchCluster isc = new IndexedSearchCluster(search, clusterName, 0); isc.setRoutingSelector(clusterElem.childAsString("documents.selection")); Double visibilityDelay = clusterElem.childAsDouble("engine.proton.visibility-delay"); if (visibilityDelay != null) { isc.setVisibilityDelay(visibilityDelay); } search.addSearchCluster(isc, getQueryTimeout(clusterElem), indexedDefs); } } private List<ModelElement> getIndexedSearchDefinitions(ModelElement clusterElem) { List<ModelElement> indexedDefs = new ArrayList<>(); ModelElement docElem = clusterElem.getChild("documents"); if (docElem == null) { return indexedDefs; } for (ModelElement docType : docElem.subElements("document")) { String mode = docType.getStringAttribute("mode"); if ("index".equals(mode)) { indexedDefs.add(docType); } } return indexedDefs; } }
Sufficient to just call `tester.deploy(oldModel, getServices(newGlobal), validationOverrides);` without capturing named return value?
private void testChangeGlobalAttribute(boolean allowed, boolean oldGlobal, boolean newGlobal, String validationOverrides) { ValidationTester tester = new ValidationTester(); VespaModel oldModel = tester.deploy(null, getServices(oldGlobal), validationOverrides).getFirst(); try { List<ConfigChangeAction> changeActions = tester.deploy(oldModel, getServices(newGlobal), validationOverrides).getSecond(); assertTrue(allowed); } catch (IllegalStateException e) { assertFalse(allowed); assertEquals("Document type music in cluster default changed global from " + oldGlobal + " to " + newGlobal, e.getMessage()); } }
tester.deploy(oldModel, getServices(newGlobal), validationOverrides).getSecond();
private void testChangeGlobalAttribute(boolean allowed, boolean oldGlobal, boolean newGlobal, String validationOverrides) { ValidationTester tester = new ValidationTester(); VespaModel oldModel = tester.deploy(null, getServices(oldGlobal), validationOverrides).getFirst(); try { tester.deploy(oldModel, getServices(newGlobal), validationOverrides).getSecond(); assertTrue(allowed); } catch (IllegalStateException e) { assertFalse(allowed); assertEquals("Document type music in cluster default changed global from " + oldGlobal + " to " + newGlobal, e.getMessage()); } }
class GlobalDocumentChangeValidatorTest { @Test public void testChangGlobalAttribute() throws IOException, SAXException { testChangeGlobalAttribute(true, false, false, null); testChangeGlobalAttribute(true, true, true, null); testChangeGlobalAttribute(false, false, true, null); testChangeGlobalAttribute(false, true, false, null); testChangeGlobalAttribute(true, false, true, globalDocumentValidationOverrides); testChangeGlobalAttribute(true, true, false, globalDocumentValidationOverrides); } private static final String getServices(boolean isGlobal) { return "<services version='1.0'>" + " <content id='default' version='1.0'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document type='music' mode='index' global='" + isGlobal + "'/>" + " </documents>" + " <nodes count='1'/>" + " </content>" + "</services>"; } private static final String globalDocumentValidationOverrides = "<validation-overrides>\n" + " <allow until='2000-01-14' comment='test override'>global-document-change</allow>\n" + "</validation-overrides>\n"; }
class GlobalDocumentChangeValidatorTest { @Test public void testChangGlobalAttribute() throws IOException, SAXException { testChangeGlobalAttribute(true, false, false, null); testChangeGlobalAttribute(true, true, true, null); testChangeGlobalAttribute(false, false, true, null); testChangeGlobalAttribute(false, true, false, null); testChangeGlobalAttribute(true, false, true, globalDocumentValidationOverrides); testChangeGlobalAttribute(true, true, false, globalDocumentValidationOverrides); } private static final String getServices(boolean isGlobal) { return "<services version='1.0'>" + " <content id='default' version='1.0'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document type='music' mode='index' global='" + isGlobal + "'/>" + " </documents>" + " <nodes count='1'/>" + " </content>" + "</services>"; } private static final String globalDocumentValidationOverrides = "<validation-overrides>\n" + " <allow until='2000-01-14' comment='test override'>global-document-change</allow>\n" + "</validation-overrides>\n"; }
You are leaking the connection pool of the old client here as you are not explicitly closing it. Probably not a big deal since the ssl context is not updated often though.
private void setClient(SSLContext sslContext, AthenzService identity) { this.client = createClient(sslContext, identity); }
this.client = createClient(sslContext, identity);
private void setClient(SSLContext sslContext, AthenzService identity) { this.client = createClient(sslContext, identity); }
class ConfigServerApiImpl implements ConfigServerApi { private static final PrefixLogger NODE_ADMIN_LOGGER = PrefixLogger.getNodeAdminLogger(ConfigServerApiImpl.class); private final ObjectMapper mapper = new ObjectMapper(); private final List<URI> configServers; private Runnable runOnClose = () -> {}; /** * The 'client' may be periodically re-created through calls to setSSLConnectionSocketFactory. * * The 'client' reference must be volatile because it is set and read in different threads, and visibility * of changes is only guaranteed for volatile variables. */ private volatile SelfCloseableHttpClient client; /** * Creates an api for talking to the config servers with a fixed socket factory. * * <p>This may be used to avoid requiring background certificate signing requests (CSR) * against the config server when client validation is enabled in the config server. */ public static ConfigServerApiImpl createWithSocketFactory( List<URI> configServerHosts, SSLConnectionSocketFactory socketFactory) { return new ConfigServerApiImpl(configServerHosts, new SelfCloseableHttpClient(socketFactory)); } public static ConfigServerApiImpl create(ConfigServerInfo configServerInfo, SiaIdentityProvider identityProvider) { return new ConfigServerApiImpl(configServerInfo.getConfigServerUris(), identityProvider); } public static ConfigServerApiImpl createFor(ConfigServerInfo configServerInfo, SiaIdentityProvider identityProvider, HostName configServer) { URI uri = configServerInfo.getConfigServerUri(configServer.value()); return new ConfigServerApiImpl(Collections.singletonList(uri), identityProvider); } static ConfigServerApiImpl createForTestingWithClient(List<URI> configServerHosts, SelfCloseableHttpClient client) { return new ConfigServerApiImpl(configServerHosts, client); } private ConfigServerApiImpl(Collection<URI> configServers, SiaIdentityProvider identityProvider) { this(configServers, createClient(identityProvider)); ServiceIdentityProvider.Listener listener = this::setClient; identityProvider.addIdentityListener(listener); this.runOnClose = () -> identityProvider.removeIdentityListener(listener); } private ConfigServerApiImpl(Collection<URI> configServers, SelfCloseableHttpClient client) { this.configServers = randomizeConfigServerUris(configServers); this.client = client; } interface CreateRequest { HttpUriRequest createRequest(URI configServerUri) throws JsonProcessingException, UnsupportedEncodingException; } private <T> T tryAllConfigServers(CreateRequest requestFactory, Class<T> wantedReturnType) { Exception lastException = null; for (URI configServer : configServers) { final CloseableHttpResponse response; try { response = client.execute(requestFactory.createRequest(configServer)); } catch (Exception e) { if (e.getMessage().indexOf("(Connection refused)") > 0) { NODE_ADMIN_LOGGER.info("Connection refused to " + configServer + " (upgrading?), will try next"); } else { NODE_ADMIN_LOGGER.warning("Failed to communicate with " + configServer + ", will try next: " + e.getMessage()); } lastException = e; continue; } try { Optional<HttpException> retryableException = HttpException.handleStatusCode( response.getStatusLine().getStatusCode(), "Config server " + configServer); if (retryableException.isPresent()) { lastException = retryableException.get(); continue; } try { return mapper.readValue(response.getEntity().getContent(), wantedReturnType); } catch (IOException e) { throw new RuntimeException("Response didn't contain nodes element, failed parsing?", e); } } finally { try { response.close(); } catch (IOException e) { NODE_ADMIN_LOGGER.warning("Ignoring exception from closing response", e); } } } throw new RuntimeException("All requests against the config servers (" + configServers + ") failed, last as follows:", lastException); } @Override public <T> T put(String path, Optional<Object> bodyJsonPojo, Class<T> wantedReturnType) { return tryAllConfigServers(configServer -> { HttpPut put = new HttpPut(configServer.resolve(path)); setContentTypeToApplicationJson(put); if (bodyJsonPojo.isPresent()) { put.setEntity(new StringEntity(mapper.writeValueAsString(bodyJsonPojo.get()))); } return put; }, wantedReturnType); } @Override public <T> T patch(String path, Object bodyJsonPojo, Class<T> wantedReturnType) { return tryAllConfigServers(configServer -> { HttpPatch patch = new HttpPatch(configServer.resolve(path)); setContentTypeToApplicationJson(patch); patch.setEntity(new StringEntity(mapper.writeValueAsString(bodyJsonPojo))); return patch; }, wantedReturnType); } @Override public <T> T delete(String path, Class<T> wantedReturnType) { return tryAllConfigServers(configServer -> new HttpDelete(configServer.resolve(path)), wantedReturnType); } @Override public <T> T get(String path, Class<T> wantedReturnType) { return tryAllConfigServers(configServer -> new HttpGet(configServer.resolve(path)), wantedReturnType); } @Override public <T> T post(String path, Object bodyJsonPojo, Class<T> wantedReturnType) { return tryAllConfigServers(configServer -> { HttpPost post = new HttpPost(configServer.resolve(path)); setContentTypeToApplicationJson(post); post.setEntity(new StringEntity(mapper.writeValueAsString(bodyJsonPojo))); return post; }, wantedReturnType); } @Override public void close() { runOnClose.run(); client.close(); } private void setContentTypeToApplicationJson(HttpRequestBase request) { request.setHeader(HttpHeaders.CONTENT_TYPE, "application/json"); } private static SelfCloseableHttpClient createClient(SSLContext sslContext, AthenzService identity) { AthenzIdentityVerifier identityVerifier = new AthenzIdentityVerifier(singleton(identity)); SSLConnectionSocketFactory socketFactory = new SSLConnectionSocketFactory(sslContext, identityVerifier); return new SelfCloseableHttpClient(socketFactory); } private static SelfCloseableHttpClient createClient(SiaIdentityProvider identityProvider) { return createClient(identityProvider.getIdentitySslContext(), identityProvider.identity()); } private static List<URI> randomizeConfigServerUris(Collection<URI> configServerUris) { List<URI> shuffledConfigServerHosts = new ArrayList<>(configServerUris); Collections.shuffle(shuffledConfigServerHosts); return shuffledConfigServerHosts; } }
class ConfigServerApiImpl implements ConfigServerApi { private static final PrefixLogger NODE_ADMIN_LOGGER = PrefixLogger.getNodeAdminLogger(ConfigServerApiImpl.class); private final ObjectMapper mapper = new ObjectMapper(); private final List<URI> configServers; private Runnable runOnClose = () -> {}; /** * The 'client' may be periodically re-created through calls to setSSLConnectionSocketFactory. * * The 'client' reference must be volatile because it is set and read in different threads, and visibility * of changes is only guaranteed for volatile variables. */ private volatile SelfCloseableHttpClient client; /** * Creates an api for talking to the config servers with a fixed socket factory. * * <p>This may be used to avoid requiring background certificate signing requests (CSR) * against the config server when client validation is enabled in the config server. */ public static ConfigServerApiImpl createWithSocketFactory( List<URI> configServerHosts, SSLConnectionSocketFactory socketFactory) { return new ConfigServerApiImpl(configServerHosts, new SelfCloseableHttpClient(socketFactory)); } public static ConfigServerApiImpl create(ConfigServerInfo configServerInfo, SiaIdentityProvider identityProvider) { return new ConfigServerApiImpl(configServerInfo.getConfigServerUris(), identityProvider); } public static ConfigServerApiImpl createFor(ConfigServerInfo configServerInfo, SiaIdentityProvider identityProvider, HostName configServer) { URI uri = configServerInfo.getConfigServerUri(configServer.value()); return new ConfigServerApiImpl(Collections.singletonList(uri), identityProvider); } static ConfigServerApiImpl createForTestingWithClient(List<URI> configServerHosts, SelfCloseableHttpClient client) { return new ConfigServerApiImpl(configServerHosts, client); } private ConfigServerApiImpl(Collection<URI> configServers, SiaIdentityProvider identityProvider) { this(configServers, createClient(identityProvider)); ServiceIdentityProvider.Listener listener = this::setClient; identityProvider.addIdentityListener(listener); this.runOnClose = () -> identityProvider.removeIdentityListener(listener); } private ConfigServerApiImpl(Collection<URI> configServers, SelfCloseableHttpClient client) { this.configServers = randomizeConfigServerUris(configServers); this.client = client; } interface CreateRequest { HttpUriRequest createRequest(URI configServerUri) throws JsonProcessingException, UnsupportedEncodingException; } private <T> T tryAllConfigServers(CreateRequest requestFactory, Class<T> wantedReturnType) { Exception lastException = null; for (URI configServer : configServers) { final CloseableHttpResponse response; try { response = client.execute(requestFactory.createRequest(configServer)); } catch (Exception e) { if (e.getMessage().indexOf("(Connection refused)") > 0) { NODE_ADMIN_LOGGER.info("Connection refused to " + configServer + " (upgrading?), will try next"); } else { NODE_ADMIN_LOGGER.warning("Failed to communicate with " + configServer + ", will try next: " + e.getMessage()); } lastException = e; continue; } try { Optional<HttpException> retryableException = HttpException.handleStatusCode( response.getStatusLine().getStatusCode(), "Config server " + configServer); if (retryableException.isPresent()) { lastException = retryableException.get(); continue; } try { return mapper.readValue(response.getEntity().getContent(), wantedReturnType); } catch (IOException e) { throw new RuntimeException("Response didn't contain nodes element, failed parsing?", e); } } finally { try { response.close(); } catch (IOException e) { NODE_ADMIN_LOGGER.warning("Ignoring exception from closing response", e); } } } throw new RuntimeException("All requests against the config servers (" + configServers + ") failed, last as follows:", lastException); } @Override public <T> T put(String path, Optional<Object> bodyJsonPojo, Class<T> wantedReturnType) { return tryAllConfigServers(configServer -> { HttpPut put = new HttpPut(configServer.resolve(path)); setContentTypeToApplicationJson(put); if (bodyJsonPojo.isPresent()) { put.setEntity(new StringEntity(mapper.writeValueAsString(bodyJsonPojo.get()))); } return put; }, wantedReturnType); } @Override public <T> T patch(String path, Object bodyJsonPojo, Class<T> wantedReturnType) { return tryAllConfigServers(configServer -> { HttpPatch patch = new HttpPatch(configServer.resolve(path)); setContentTypeToApplicationJson(patch); patch.setEntity(new StringEntity(mapper.writeValueAsString(bodyJsonPojo))); return patch; }, wantedReturnType); } @Override public <T> T delete(String path, Class<T> wantedReturnType) { return tryAllConfigServers(configServer -> new HttpDelete(configServer.resolve(path)), wantedReturnType); } @Override public <T> T get(String path, Class<T> wantedReturnType) { return tryAllConfigServers(configServer -> new HttpGet(configServer.resolve(path)), wantedReturnType); } @Override public <T> T post(String path, Object bodyJsonPojo, Class<T> wantedReturnType) { return tryAllConfigServers(configServer -> { HttpPost post = new HttpPost(configServer.resolve(path)); setContentTypeToApplicationJson(post); post.setEntity(new StringEntity(mapper.writeValueAsString(bodyJsonPojo))); return post; }, wantedReturnType); } @Override public void close() { runOnClose.run(); client.close(); } private void setContentTypeToApplicationJson(HttpRequestBase request) { request.setHeader(HttpHeaders.CONTENT_TYPE, "application/json"); } private static SelfCloseableHttpClient createClient(SSLContext sslContext, AthenzService identity) { AthenzIdentityVerifier identityVerifier = new AthenzIdentityVerifier(singleton(identity)); SSLConnectionSocketFactory socketFactory = new SSLConnectionSocketFactory(sslContext, identityVerifier); return new SelfCloseableHttpClient(socketFactory); } private static SelfCloseableHttpClient createClient(SiaIdentityProvider identityProvider) { return createClient(identityProvider.getIdentitySslContext(), identityProvider.identity()); } private static List<URI> randomizeConfigServerUris(Collection<URI> configServerUris) { List<URI> shuffledConfigServerHosts = new ArrayList<>(configServerUris); Collections.shuffle(shuffledConfigServerHosts); return shuffledConfigServerHosts; } }
It cannot be closed as there may be asynchronous (or future) executions on the client's execute(). The pool is closed in finalize().
private void setClient(SSLContext sslContext, AthenzService identity) { this.client = createClient(sslContext, identity); }
this.client = createClient(sslContext, identity);
private void setClient(SSLContext sslContext, AthenzService identity) { this.client = createClient(sslContext, identity); }
class ConfigServerApiImpl implements ConfigServerApi { private static final PrefixLogger NODE_ADMIN_LOGGER = PrefixLogger.getNodeAdminLogger(ConfigServerApiImpl.class); private final ObjectMapper mapper = new ObjectMapper(); private final List<URI> configServers; private Runnable runOnClose = () -> {}; /** * The 'client' may be periodically re-created through calls to setSSLConnectionSocketFactory. * * The 'client' reference must be volatile because it is set and read in different threads, and visibility * of changes is only guaranteed for volatile variables. */ private volatile SelfCloseableHttpClient client; /** * Creates an api for talking to the config servers with a fixed socket factory. * * <p>This may be used to avoid requiring background certificate signing requests (CSR) * against the config server when client validation is enabled in the config server. */ public static ConfigServerApiImpl createWithSocketFactory( List<URI> configServerHosts, SSLConnectionSocketFactory socketFactory) { return new ConfigServerApiImpl(configServerHosts, new SelfCloseableHttpClient(socketFactory)); } public static ConfigServerApiImpl create(ConfigServerInfo configServerInfo, SiaIdentityProvider identityProvider) { return new ConfigServerApiImpl(configServerInfo.getConfigServerUris(), identityProvider); } public static ConfigServerApiImpl createFor(ConfigServerInfo configServerInfo, SiaIdentityProvider identityProvider, HostName configServer) { URI uri = configServerInfo.getConfigServerUri(configServer.value()); return new ConfigServerApiImpl(Collections.singletonList(uri), identityProvider); } static ConfigServerApiImpl createForTestingWithClient(List<URI> configServerHosts, SelfCloseableHttpClient client) { return new ConfigServerApiImpl(configServerHosts, client); } private ConfigServerApiImpl(Collection<URI> configServers, SiaIdentityProvider identityProvider) { this(configServers, createClient(identityProvider)); ServiceIdentityProvider.Listener listener = this::setClient; identityProvider.addIdentityListener(listener); this.runOnClose = () -> identityProvider.removeIdentityListener(listener); } private ConfigServerApiImpl(Collection<URI> configServers, SelfCloseableHttpClient client) { this.configServers = randomizeConfigServerUris(configServers); this.client = client; } interface CreateRequest { HttpUriRequest createRequest(URI configServerUri) throws JsonProcessingException, UnsupportedEncodingException; } private <T> T tryAllConfigServers(CreateRequest requestFactory, Class<T> wantedReturnType) { Exception lastException = null; for (URI configServer : configServers) { final CloseableHttpResponse response; try { response = client.execute(requestFactory.createRequest(configServer)); } catch (Exception e) { if (e.getMessage().indexOf("(Connection refused)") > 0) { NODE_ADMIN_LOGGER.info("Connection refused to " + configServer + " (upgrading?), will try next"); } else { NODE_ADMIN_LOGGER.warning("Failed to communicate with " + configServer + ", will try next: " + e.getMessage()); } lastException = e; continue; } try { Optional<HttpException> retryableException = HttpException.handleStatusCode( response.getStatusLine().getStatusCode(), "Config server " + configServer); if (retryableException.isPresent()) { lastException = retryableException.get(); continue; } try { return mapper.readValue(response.getEntity().getContent(), wantedReturnType); } catch (IOException e) { throw new RuntimeException("Response didn't contain nodes element, failed parsing?", e); } } finally { try { response.close(); } catch (IOException e) { NODE_ADMIN_LOGGER.warning("Ignoring exception from closing response", e); } } } throw new RuntimeException("All requests against the config servers (" + configServers + ") failed, last as follows:", lastException); } @Override public <T> T put(String path, Optional<Object> bodyJsonPojo, Class<T> wantedReturnType) { return tryAllConfigServers(configServer -> { HttpPut put = new HttpPut(configServer.resolve(path)); setContentTypeToApplicationJson(put); if (bodyJsonPojo.isPresent()) { put.setEntity(new StringEntity(mapper.writeValueAsString(bodyJsonPojo.get()))); } return put; }, wantedReturnType); } @Override public <T> T patch(String path, Object bodyJsonPojo, Class<T> wantedReturnType) { return tryAllConfigServers(configServer -> { HttpPatch patch = new HttpPatch(configServer.resolve(path)); setContentTypeToApplicationJson(patch); patch.setEntity(new StringEntity(mapper.writeValueAsString(bodyJsonPojo))); return patch; }, wantedReturnType); } @Override public <T> T delete(String path, Class<T> wantedReturnType) { return tryAllConfigServers(configServer -> new HttpDelete(configServer.resolve(path)), wantedReturnType); } @Override public <T> T get(String path, Class<T> wantedReturnType) { return tryAllConfigServers(configServer -> new HttpGet(configServer.resolve(path)), wantedReturnType); } @Override public <T> T post(String path, Object bodyJsonPojo, Class<T> wantedReturnType) { return tryAllConfigServers(configServer -> { HttpPost post = new HttpPost(configServer.resolve(path)); setContentTypeToApplicationJson(post); post.setEntity(new StringEntity(mapper.writeValueAsString(bodyJsonPojo))); return post; }, wantedReturnType); } @Override public void close() { runOnClose.run(); client.close(); } private void setContentTypeToApplicationJson(HttpRequestBase request) { request.setHeader(HttpHeaders.CONTENT_TYPE, "application/json"); } private static SelfCloseableHttpClient createClient(SSLContext sslContext, AthenzService identity) { AthenzIdentityVerifier identityVerifier = new AthenzIdentityVerifier(singleton(identity)); SSLConnectionSocketFactory socketFactory = new SSLConnectionSocketFactory(sslContext, identityVerifier); return new SelfCloseableHttpClient(socketFactory); } private static SelfCloseableHttpClient createClient(SiaIdentityProvider identityProvider) { return createClient(identityProvider.getIdentitySslContext(), identityProvider.identity()); } private static List<URI> randomizeConfigServerUris(Collection<URI> configServerUris) { List<URI> shuffledConfigServerHosts = new ArrayList<>(configServerUris); Collections.shuffle(shuffledConfigServerHosts); return shuffledConfigServerHosts; } }
class ConfigServerApiImpl implements ConfigServerApi { private static final PrefixLogger NODE_ADMIN_LOGGER = PrefixLogger.getNodeAdminLogger(ConfigServerApiImpl.class); private final ObjectMapper mapper = new ObjectMapper(); private final List<URI> configServers; private Runnable runOnClose = () -> {}; /** * The 'client' may be periodically re-created through calls to setSSLConnectionSocketFactory. * * The 'client' reference must be volatile because it is set and read in different threads, and visibility * of changes is only guaranteed for volatile variables. */ private volatile SelfCloseableHttpClient client; /** * Creates an api for talking to the config servers with a fixed socket factory. * * <p>This may be used to avoid requiring background certificate signing requests (CSR) * against the config server when client validation is enabled in the config server. */ public static ConfigServerApiImpl createWithSocketFactory( List<URI> configServerHosts, SSLConnectionSocketFactory socketFactory) { return new ConfigServerApiImpl(configServerHosts, new SelfCloseableHttpClient(socketFactory)); } public static ConfigServerApiImpl create(ConfigServerInfo configServerInfo, SiaIdentityProvider identityProvider) { return new ConfigServerApiImpl(configServerInfo.getConfigServerUris(), identityProvider); } public static ConfigServerApiImpl createFor(ConfigServerInfo configServerInfo, SiaIdentityProvider identityProvider, HostName configServer) { URI uri = configServerInfo.getConfigServerUri(configServer.value()); return new ConfigServerApiImpl(Collections.singletonList(uri), identityProvider); } static ConfigServerApiImpl createForTestingWithClient(List<URI> configServerHosts, SelfCloseableHttpClient client) { return new ConfigServerApiImpl(configServerHosts, client); } private ConfigServerApiImpl(Collection<URI> configServers, SiaIdentityProvider identityProvider) { this(configServers, createClient(identityProvider)); ServiceIdentityProvider.Listener listener = this::setClient; identityProvider.addIdentityListener(listener); this.runOnClose = () -> identityProvider.removeIdentityListener(listener); } private ConfigServerApiImpl(Collection<URI> configServers, SelfCloseableHttpClient client) { this.configServers = randomizeConfigServerUris(configServers); this.client = client; } interface CreateRequest { HttpUriRequest createRequest(URI configServerUri) throws JsonProcessingException, UnsupportedEncodingException; } private <T> T tryAllConfigServers(CreateRequest requestFactory, Class<T> wantedReturnType) { Exception lastException = null; for (URI configServer : configServers) { final CloseableHttpResponse response; try { response = client.execute(requestFactory.createRequest(configServer)); } catch (Exception e) { if (e.getMessage().indexOf("(Connection refused)") > 0) { NODE_ADMIN_LOGGER.info("Connection refused to " + configServer + " (upgrading?), will try next"); } else { NODE_ADMIN_LOGGER.warning("Failed to communicate with " + configServer + ", will try next: " + e.getMessage()); } lastException = e; continue; } try { Optional<HttpException> retryableException = HttpException.handleStatusCode( response.getStatusLine().getStatusCode(), "Config server " + configServer); if (retryableException.isPresent()) { lastException = retryableException.get(); continue; } try { return mapper.readValue(response.getEntity().getContent(), wantedReturnType); } catch (IOException e) { throw new RuntimeException("Response didn't contain nodes element, failed parsing?", e); } } finally { try { response.close(); } catch (IOException e) { NODE_ADMIN_LOGGER.warning("Ignoring exception from closing response", e); } } } throw new RuntimeException("All requests against the config servers (" + configServers + ") failed, last as follows:", lastException); } @Override public <T> T put(String path, Optional<Object> bodyJsonPojo, Class<T> wantedReturnType) { return tryAllConfigServers(configServer -> { HttpPut put = new HttpPut(configServer.resolve(path)); setContentTypeToApplicationJson(put); if (bodyJsonPojo.isPresent()) { put.setEntity(new StringEntity(mapper.writeValueAsString(bodyJsonPojo.get()))); } return put; }, wantedReturnType); } @Override public <T> T patch(String path, Object bodyJsonPojo, Class<T> wantedReturnType) { return tryAllConfigServers(configServer -> { HttpPatch patch = new HttpPatch(configServer.resolve(path)); setContentTypeToApplicationJson(patch); patch.setEntity(new StringEntity(mapper.writeValueAsString(bodyJsonPojo))); return patch; }, wantedReturnType); } @Override public <T> T delete(String path, Class<T> wantedReturnType) { return tryAllConfigServers(configServer -> new HttpDelete(configServer.resolve(path)), wantedReturnType); } @Override public <T> T get(String path, Class<T> wantedReturnType) { return tryAllConfigServers(configServer -> new HttpGet(configServer.resolve(path)), wantedReturnType); } @Override public <T> T post(String path, Object bodyJsonPojo, Class<T> wantedReturnType) { return tryAllConfigServers(configServer -> { HttpPost post = new HttpPost(configServer.resolve(path)); setContentTypeToApplicationJson(post); post.setEntity(new StringEntity(mapper.writeValueAsString(bodyJsonPojo))); return post; }, wantedReturnType); } @Override public void close() { runOnClose.run(); client.close(); } private void setContentTypeToApplicationJson(HttpRequestBase request) { request.setHeader(HttpHeaders.CONTENT_TYPE, "application/json"); } private static SelfCloseableHttpClient createClient(SSLContext sslContext, AthenzService identity) { AthenzIdentityVerifier identityVerifier = new AthenzIdentityVerifier(singleton(identity)); SSLConnectionSocketFactory socketFactory = new SSLConnectionSocketFactory(sslContext, identityVerifier); return new SelfCloseableHttpClient(socketFactory); } private static SelfCloseableHttpClient createClient(SiaIdentityProvider identityProvider) { return createClient(identityProvider.getIdentitySslContext(), identityProvider.identity()); } private static List<URI> randomizeConfigServerUris(Collection<URI> configServerUris) { List<URI> shuffledConfigServerHosts = new ArrayList<>(configServerUris); Collections.shuffle(shuffledConfigServerHosts); return shuffledConfigServerHosts; } }
For future reference: `UncheckedIOException` is better suited here.
public SchemaValidators(Version vespaVersion, DeployLogger logger) { this.deployLogger = logger; File schemaDir = null; try { schemaDir = saveSchemasFromJar(new File(SchemaValidators.schemaDirBase), vespaVersion); servicesXmlValidator = createValidator(schemaDir, servicesXmlSchemaName); hostsXmlValidator = createValidator(schemaDir, hostsXmlSchemaName); deploymentXmlValidator = createValidator(schemaDir, deploymentXmlSchemaName); validationOverridesXmlValidator = createValidator(schemaDir, validationOverridesXmlSchemaName); containerIncludeXmlValidator = createValidator(schemaDir, containerIncludeXmlSchemaName); routingStandaloneXmlValidator = createValidator(schemaDir, routingStandaloneXmlSchemaName); } catch (IOException ioe) { throw new RuntimeException(ioe); } catch (Exception e) { throw e; } finally { if (schemaDir != null) IOUtils.recursiveDeleteDir(schemaDir); } }
throw new RuntimeException(ioe);
public SchemaValidators(Version vespaVersion, DeployLogger logger) { this.deployLogger = logger; File schemaDir = null; try { schemaDir = saveSchemasFromJar(new File(SchemaValidators.schemaDirBase), vespaVersion); servicesXmlValidator = createValidator(schemaDir, servicesXmlSchemaName); hostsXmlValidator = createValidator(schemaDir, hostsXmlSchemaName); deploymentXmlValidator = createValidator(schemaDir, deploymentXmlSchemaName); validationOverridesXmlValidator = createValidator(schemaDir, validationOverridesXmlSchemaName); containerIncludeXmlValidator = createValidator(schemaDir, containerIncludeXmlSchemaName); routingStandaloneXmlValidator = createValidator(schemaDir, routingStandaloneXmlSchemaName); } catch (IOException ioe) { throw new RuntimeException(ioe); } catch (Exception e) { throw e; } finally { if (schemaDir != null) IOUtils.recursiveDeleteDir(schemaDir); } }
class SchemaValidators { private static final String schemaDirBase = System.getProperty("java.io.tmpdir", File.separator + "tmp" + File.separator + "vespa"); private static final Logger log = Logger.getLogger(SchemaValidators.class.getName()); private static final String servicesXmlSchemaName = "services.rnc"; private static final String hostsXmlSchemaName = "hosts.rnc"; private static final String deploymentXmlSchemaName = "deployment.rnc"; private static final String validationOverridesXmlSchemaName = "validation-overrides.rnc"; private static final String containerIncludeXmlSchemaName = "container-include.rnc"; private static final String routingStandaloneXmlSchemaName = "routing-standalone.rnc"; private final DeployLogger deployLogger; private final SchemaValidator servicesXmlValidator; private final SchemaValidator hostsXmlValidator; private final SchemaValidator deploymentXmlValidator; private final SchemaValidator validationOverridesXmlValidator; private final SchemaValidator containerIncludeXmlValidator; private final SchemaValidator routingStandaloneXmlValidator; /** * Initializes the validator by using the given file as schema file * * @param vespaVersion the version of Vespa we should validate against */ /** * Initializes the validator by using the given file as schema file * * @param vespaVersion the version of Vespa we should validate against */ public SchemaValidators(Version vespaVersion) { this(vespaVersion, new BaseDeployLogger()); } public SchemaValidator servicesXmlValidator() { return servicesXmlValidator; } public SchemaValidator hostsXmlValidator() { return hostsXmlValidator; } public SchemaValidator deploymentXmlValidator() { return deploymentXmlValidator; } SchemaValidator validationOverridesXmlValidator() { return validationOverridesXmlValidator; } SchemaValidator containerIncludeXmlValidator() { return containerIncludeXmlValidator; } public SchemaValidator routingStandaloneXmlValidator() { return routingStandaloneXmlValidator; } /** * Look for the schema files that should be in vespa-model.jar and saves them on temp dir. * * @return the directory the schema files are stored in * @throws IOException if it is not possible to read schema files */ File saveSchemasFromJar(File tmpBase, Version vespaVersion) throws IOException { final Class<? extends SchemaValidators> schemaValidatorClass = this.getClass(); final ClassLoader classLoader = schemaValidatorClass.getClassLoader(); Enumeration<URL> uris = classLoader.getResources("schema"); if (uris == null) return null; File tmpDir = java.nio.file.Files.createTempDirectory(tmpBase.toPath(), "vespa").toFile(); log.log(LogLevel.DEBUG, "Will save all XML schemas to " + tmpDir); while (uris.hasMoreElements()) { URL u = uris.nextElement(); log.log(LogLevel.DEBUG, "uri for resource 'schema'=" + u.toString()); if ("jar".equals(u.getProtocol())) { JarURLConnection jarConnection = (JarURLConnection) u.openConnection(); JarFile jarFile = jarConnection.getJarFile(); for (Enumeration<JarEntry> entries = jarFile.entries(); entries.hasMoreElements(); ) { JarEntry je = entries.nextElement(); if (je.getName().startsWith("schema/") && je.getName().endsWith(".rnc")) { writeContentsToFile(tmpDir, je.getName(), jarFile.getInputStream(je)); } } jarFile.close(); } else if ("bundle".equals(u.getProtocol())) { Bundle bundle = FrameworkUtil.getBundle(schemaValidatorClass); log.log(LogLevel.DEBUG, classLoader.toString()); log.log(LogLevel.DEBUG, "bundle=" + bundle); if (bundle == null) { File schemaPath; if (vespaVersion.getMajor() == 5) { schemaPath = new File(getDefaults().underVespaHome("share/vespa/schema/version/5.x/schema/")); } else { schemaPath = new File(getDefaults().underVespaHome("share/vespa/schema/")); } log.log(LogLevel.DEBUG, "Using schemas found in " + schemaPath); copySchemas(schemaPath, tmpDir); } else { log.log(LogLevel.DEBUG, String.format("Saving schemas for model bundle %s:%s", bundle.getSymbolicName(), bundle .getVersion())); for (Enumeration<URL> entries = bundle.findEntries("schema", "*.rnc", true); entries.hasMoreElements(); ) { URL url = entries.nextElement(); writeContentsToFile(tmpDir, url.getFile(), url.openStream()); } } } else if ("file".equals(u.getProtocol())) { File schemaPath = new File(u.getPath()); copySchemas(schemaPath, tmpDir); } } return tmpDir; } private static void copySchemas(File from, File to) throws IOException { if (! from.exists()) throw new IOException("Could not find schema source directory '" + from + "'"); if (! from.isDirectory()) throw new IOException("Schema source '" + from + "' is not a directory"); File sourceFile = new File(from, servicesXmlSchemaName); if (! sourceFile.exists()) throw new IOException("Schema source file '" + sourceFile + "' not found"); IOUtils.copyDirectoryInto(from, to); } private static void writeContentsToFile(File outDir, String outFile, InputStream inputStream) throws IOException { String contents = IOUtils.readAll(new InputStreamReader(inputStream)); File out = new File(outDir, outFile); IOUtils.writeFile(out, contents, false); } private SchemaValidator createValidator(File schemaDir, String schemaFile) { try { File file = new File(schemaDir + File.separator + "schema" + File.separator + schemaFile); return new SchemaValidator(file, deployLogger); } catch (SAXException e) { throw new RuntimeException("Invalid schema '" + schemaFile + "'", e); } catch (IOException e) { throw new RuntimeException("IO error reading schema '" + schemaFile + "'", e); } } }
class SchemaValidators { private static final String schemaDirBase = System.getProperty("java.io.tmpdir", File.separator + "tmp" + File.separator + "vespa"); private static final Logger log = Logger.getLogger(SchemaValidators.class.getName()); private static final String servicesXmlSchemaName = "services.rnc"; private static final String hostsXmlSchemaName = "hosts.rnc"; private static final String deploymentXmlSchemaName = "deployment.rnc"; private static final String validationOverridesXmlSchemaName = "validation-overrides.rnc"; private static final String containerIncludeXmlSchemaName = "container-include.rnc"; private static final String routingStandaloneXmlSchemaName = "routing-standalone.rnc"; private final DeployLogger deployLogger; private final SchemaValidator servicesXmlValidator; private final SchemaValidator hostsXmlValidator; private final SchemaValidator deploymentXmlValidator; private final SchemaValidator validationOverridesXmlValidator; private final SchemaValidator containerIncludeXmlValidator; private final SchemaValidator routingStandaloneXmlValidator; /** * Initializes the validator by using the given file as schema file * * @param vespaVersion the version of Vespa we should validate against */ /** * Initializes the validator by using the given file as schema file * * @param vespaVersion the version of Vespa we should validate against */ public SchemaValidators(Version vespaVersion) { this(vespaVersion, new BaseDeployLogger()); } public SchemaValidator servicesXmlValidator() { return servicesXmlValidator; } public SchemaValidator hostsXmlValidator() { return hostsXmlValidator; } public SchemaValidator deploymentXmlValidator() { return deploymentXmlValidator; } SchemaValidator validationOverridesXmlValidator() { return validationOverridesXmlValidator; } SchemaValidator containerIncludeXmlValidator() { return containerIncludeXmlValidator; } public SchemaValidator routingStandaloneXmlValidator() { return routingStandaloneXmlValidator; } /** * Look for the schema files that should be in vespa-model.jar and saves them on temp dir. * * @return the directory the schema files are stored in * @throws IOException if it is not possible to read schema files */ File saveSchemasFromJar(File tmpBase, Version vespaVersion) throws IOException { final Class<? extends SchemaValidators> schemaValidatorClass = this.getClass(); final ClassLoader classLoader = schemaValidatorClass.getClassLoader(); Enumeration<URL> uris = classLoader.getResources("schema"); if (uris == null) return null; File tmpDir = java.nio.file.Files.createTempDirectory(tmpBase.toPath(), "vespa").toFile(); log.log(LogLevel.DEBUG, "Will save all XML schemas to " + tmpDir); while (uris.hasMoreElements()) { URL u = uris.nextElement(); log.log(LogLevel.DEBUG, "uri for resource 'schema'=" + u.toString()); if ("jar".equals(u.getProtocol())) { JarURLConnection jarConnection = (JarURLConnection) u.openConnection(); JarFile jarFile = jarConnection.getJarFile(); for (Enumeration<JarEntry> entries = jarFile.entries(); entries.hasMoreElements(); ) { JarEntry je = entries.nextElement(); if (je.getName().startsWith("schema/") && je.getName().endsWith(".rnc")) { writeContentsToFile(tmpDir, je.getName(), jarFile.getInputStream(je)); } } jarFile.close(); } else if ("bundle".equals(u.getProtocol())) { Bundle bundle = FrameworkUtil.getBundle(schemaValidatorClass); log.log(LogLevel.DEBUG, classLoader.toString()); log.log(LogLevel.DEBUG, "bundle=" + bundle); if (bundle == null) { File schemaPath; if (vespaVersion.getMajor() == 5) { schemaPath = new File(getDefaults().underVespaHome("share/vespa/schema/version/5.x/schema/")); } else { schemaPath = new File(getDefaults().underVespaHome("share/vespa/schema/")); } log.log(LogLevel.DEBUG, "Using schemas found in " + schemaPath); copySchemas(schemaPath, tmpDir); } else { log.log(LogLevel.DEBUG, String.format("Saving schemas for model bundle %s:%s", bundle.getSymbolicName(), bundle .getVersion())); for (Enumeration<URL> entries = bundle.findEntries("schema", "*.rnc", true); entries.hasMoreElements(); ) { URL url = entries.nextElement(); writeContentsToFile(tmpDir, url.getFile(), url.openStream()); } } } else if ("file".equals(u.getProtocol())) { File schemaPath = new File(u.getPath()); copySchemas(schemaPath, tmpDir); } } return tmpDir; } private static void copySchemas(File from, File to) throws IOException { if (! from.exists()) throw new IOException("Could not find schema source directory '" + from + "'"); if (! from.isDirectory()) throw new IOException("Schema source '" + from + "' is not a directory"); File sourceFile = new File(from, servicesXmlSchemaName); if (! sourceFile.exists()) throw new IOException("Schema source file '" + sourceFile + "' not found"); IOUtils.copyDirectoryInto(from, to); } private static void writeContentsToFile(File outDir, String outFile, InputStream inputStream) throws IOException { String contents = IOUtils.readAll(new InputStreamReader(inputStream)); File out = new File(outDir, outFile); IOUtils.writeFile(out, contents, false); } private SchemaValidator createValidator(File schemaDir, String schemaFile) { try { File file = new File(schemaDir + File.separator + "schema" + File.separator + schemaFile); return new SchemaValidator(file, deployLogger); } catch (SAXException e) { throw new RuntimeException("Invalid schema '" + schemaFile + "'", e); } catch (IOException e) { throw new RuntimeException("IO error reading schema '" + schemaFile + "'", e); } } }
do you really need both NonNull and NonEmpty?
protected void adding(Item item) { Validator.ensureInstanceOf("Child item", item, TermItem.class); TermItem asTerm = (TermItem) item; Validator.ensureNotNull("Struct fieldname", asTerm.getIndexName()); Validator.ensureNotNull("Query term", asTerm.getIndexedString()); Validator.ensureNonEmpty("Struct fieldname", asTerm.getIndexName()); Validator.ensureNonEmpty("Query term", asTerm.getIndexedString()); }
Validator.ensureNonEmpty("Struct fieldname", asTerm.getIndexName());
protected void adding(Item item) { Validator.ensureInstanceOf("Child item", item, TermItem.class); TermItem asTerm = (TermItem) item; Validator.ensureNonEmpty("Struct fieldname", asTerm.getIndexName()); Validator.ensureNonEmpty("Query term", asTerm.getIndexedString()); }
class SameElementItem extends CompositeIndexedItem { public SameElementItem(String commonPath) { setIndexName(commonPath); } @Override public String getIndexedString() { StringBuilder buf = new StringBuilder(); for (Iterator<Item> i = getItemIterator(); i.hasNext();) { IndexedItem indexedItem = (IndexedItem) i.next(); buf.append(indexedItem.getIndexedString()); if (i.hasNext()) { buf.append(' '); } } return buf.toString(); } @Override public int getNumWords() { return getItemCount(); } @Override @Override public ItemType getItemType() { return ItemType.SAME_ELEMENT; } @Override public String getName() { return getItemType().toString(); } }
class SameElementItem extends CompositeIndexedItem { public SameElementItem(String commonPath) { setIndexName(commonPath); } @Override public String getIndexedString() { StringBuilder buf = new StringBuilder(); for (Iterator<Item> i = getItemIterator(); i.hasNext();) { IndexedItem indexedItem = (IndexedItem) i.next(); buf.append(indexedItem.getIndexedString()); if (i.hasNext()) { buf.append(' '); } } return buf.toString(); } protected void appendHeadingString(StringBuilder buffer) { } protected void appendBodyString(StringBuilder buffer) { appendIndexString(buffer); buffer.append('{'); for (Iterator<Item> i = getItemIterator(); i.hasNext();) { TermItem term = (TermItem) i.next(); buffer.append(term.getIndexName()).append(':').append(term.getIndexedString()); if (i.hasNext()) { buffer.append(' '); } } buffer.append('}'); } @Override public int getNumWords() { return getItemCount(); } @Override @Override public ItemType getItemType() { return ItemType.SAME_ELEMENT; } @Override public String getName() { return getItemType().toString(); } }
consider letting toString also mention the struct name
public void testAddItem() { SameElementItem s = new SameElementItem("structa"); s.addItem(new WordItem("b", "f1")); s.addItem(new WordItem("c", "f2")); s.addItem(new WordItem("d", "f3")); assertEquals("SAME_ELEMENT f1:b f2:c f3:d", s.toString()); }
assertEquals("SAME_ELEMENT f1:b f2:c f3:d", s.toString());
public void testAddItem() { SameElementItem s = new SameElementItem("structa"); s.addItem(new WordItem("b", "f1")); s.addItem(new WordItem("c", "f2")); s.addItem(new WordItem("d", "f3")); assertEquals("structa:{f1:b f2:c f3:d}", s.toString()); }
class SameElementItemTestCase { @Test @Test(expected = IllegalArgumentException.class) public void requireAllChildrenHaveStructMemberNameSet() { SameElementItem s = new SameElementItem("structa"); s.addItem(new WordItem("b", "f1")); s.addItem(new WordItem("c")); } @Test(expected = IllegalArgumentException.class) public void requireAllChildrenHaveNonEmptyTerm() { SameElementItem s = new SameElementItem("structa"); s.addItem(new WordItem("", "f2")); } @Test(expected = IllegalArgumentException.class) public void requireAllChildrenAreTermItems() { SameElementItem s = new SameElementItem("structa"); s.addItem(new AndItem()); } }
class SameElementItemTestCase { @Test @Test(expected = IllegalArgumentException.class) public void requireAllChildrenHaveStructMemberNameSet() { SameElementItem s = new SameElementItem("structa"); s.addItem(new WordItem("b", "f1")); s.addItem(new WordItem("c")); } @Test(expected = IllegalArgumentException.class) public void requireAllChildrenHaveNonEmptyTerm() { SameElementItem s = new SameElementItem("structa"); s.addItem(new WordItem("", "f2")); } @Test(expected = IllegalArgumentException.class) public void requireAllChildrenAreTermItems() { SameElementItem s = new SameElementItem("structa"); s.addItem(new AndItem()); } }
misplaced end brace
public String getIndexedString() { StringBuilder buf = new StringBuilder(); for (Iterator<Item> i = getItemIterator(); i.hasNext();) { IndexedItem indexedItem = (IndexedItem) i.next(); buf.append(indexedItem.getIndexedString()); if (i.hasNext()) { buf.append(' '); } } return buf.toString(); }
return buf.toString(); }
public String getIndexedString() { StringBuilder buf = new StringBuilder(); for (Iterator<Item> i = getItemIterator(); i.hasNext();) { IndexedItem indexedItem = (IndexedItem) i.next(); buf.append(indexedItem.getIndexedString()); if (i.hasNext()) { buf.append(' '); } } return buf.toString(); }
class SameElementItem extends CompositeIndexedItem { public SameElementItem(String commonPath) { setIndexName(commonPath); } @Override @Override public int getNumWords() { return getItemCount(); } @Override protected void adding(Item item) { Validator.ensureInstanceOf("Child item", item, TermItem.class); TermItem asTerm = (TermItem) item; Validator.ensureNotNull("Struct fieldname", asTerm.getIndexName()); Validator.ensureNotNull("Query term", asTerm.getIndexedString()); Validator.ensureNonEmpty("Struct fieldname", asTerm.getIndexName()); Validator.ensureNonEmpty("Query term", asTerm.getIndexedString()); } @Override public ItemType getItemType() { return ItemType.SAME_ELEMENT; } @Override public String getName() { return getItemType().toString(); } }
class SameElementItem extends CompositeIndexedItem { public SameElementItem(String commonPath) { setIndexName(commonPath); } @Override protected void appendHeadingString(StringBuilder buffer) { } protected void appendBodyString(StringBuilder buffer) { appendIndexString(buffer); buffer.append('{'); for (Iterator<Item> i = getItemIterator(); i.hasNext();) { TermItem term = (TermItem) i.next(); buffer.append(term.getIndexName()).append(':').append(term.getIndexedString()); if (i.hasNext()) { buffer.append(' '); } } buffer.append('}'); } @Override public int getNumWords() { return getItemCount(); } @Override protected void adding(Item item) { Validator.ensureInstanceOf("Child item", item, TermItem.class); TermItem asTerm = (TermItem) item; Validator.ensureNonEmpty("Struct fieldname", asTerm.getIndexName()); Validator.ensureNonEmpty("Query term", asTerm.getIndexedString()); } @Override public ItemType getItemType() { return ItemType.SAME_ELEMENT; } @Override public String getName() { return getItemType().toString(); } }
You are right. index is guaranteed non-null in TermItem. There also seem to be protection null terms in the various Items.
protected void adding(Item item) { Validator.ensureInstanceOf("Child item", item, TermItem.class); TermItem asTerm = (TermItem) item; Validator.ensureNotNull("Struct fieldname", asTerm.getIndexName()); Validator.ensureNotNull("Query term", asTerm.getIndexedString()); Validator.ensureNonEmpty("Struct fieldname", asTerm.getIndexName()); Validator.ensureNonEmpty("Query term", asTerm.getIndexedString()); }
Validator.ensureNonEmpty("Struct fieldname", asTerm.getIndexName());
protected void adding(Item item) { Validator.ensureInstanceOf("Child item", item, TermItem.class); TermItem asTerm = (TermItem) item; Validator.ensureNonEmpty("Struct fieldname", asTerm.getIndexName()); Validator.ensureNonEmpty("Query term", asTerm.getIndexedString()); }
class SameElementItem extends CompositeIndexedItem { public SameElementItem(String commonPath) { setIndexName(commonPath); } @Override public String getIndexedString() { StringBuilder buf = new StringBuilder(); for (Iterator<Item> i = getItemIterator(); i.hasNext();) { IndexedItem indexedItem = (IndexedItem) i.next(); buf.append(indexedItem.getIndexedString()); if (i.hasNext()) { buf.append(' '); } } return buf.toString(); } @Override public int getNumWords() { return getItemCount(); } @Override @Override public ItemType getItemType() { return ItemType.SAME_ELEMENT; } @Override public String getName() { return getItemType().toString(); } }
class SameElementItem extends CompositeIndexedItem { public SameElementItem(String commonPath) { setIndexName(commonPath); } @Override public String getIndexedString() { StringBuilder buf = new StringBuilder(); for (Iterator<Item> i = getItemIterator(); i.hasNext();) { IndexedItem indexedItem = (IndexedItem) i.next(); buf.append(indexedItem.getIndexedString()); if (i.hasNext()) { buf.append(' '); } } return buf.toString(); } protected void appendHeadingString(StringBuilder buffer) { } protected void appendBodyString(StringBuilder buffer) { appendIndexString(buffer); buffer.append('{'); for (Iterator<Item> i = getItemIterator(); i.hasNext();) { TermItem term = (TermItem) i.next(); buffer.append(term.getIndexName()).append(':').append(term.getIndexedString()); if (i.hasNext()) { buffer.append(' '); } } buffer.append('}'); } @Override public int getNumWords() { return getItemCount(); } @Override @Override public ItemType getItemType() { return ItemType.SAME_ELEMENT; } @Override public String getName() { return getItemType().toString(); } }
Fixed
public String getIndexedString() { StringBuilder buf = new StringBuilder(); for (Iterator<Item> i = getItemIterator(); i.hasNext();) { IndexedItem indexedItem = (IndexedItem) i.next(); buf.append(indexedItem.getIndexedString()); if (i.hasNext()) { buf.append(' '); } } return buf.toString(); }
return buf.toString(); }
public String getIndexedString() { StringBuilder buf = new StringBuilder(); for (Iterator<Item> i = getItemIterator(); i.hasNext();) { IndexedItem indexedItem = (IndexedItem) i.next(); buf.append(indexedItem.getIndexedString()); if (i.hasNext()) { buf.append(' '); } } return buf.toString(); }
class SameElementItem extends CompositeIndexedItem { public SameElementItem(String commonPath) { setIndexName(commonPath); } @Override @Override public int getNumWords() { return getItemCount(); } @Override protected void adding(Item item) { Validator.ensureInstanceOf("Child item", item, TermItem.class); TermItem asTerm = (TermItem) item; Validator.ensureNotNull("Struct fieldname", asTerm.getIndexName()); Validator.ensureNotNull("Query term", asTerm.getIndexedString()); Validator.ensureNonEmpty("Struct fieldname", asTerm.getIndexName()); Validator.ensureNonEmpty("Query term", asTerm.getIndexedString()); } @Override public ItemType getItemType() { return ItemType.SAME_ELEMENT; } @Override public String getName() { return getItemType().toString(); } }
class SameElementItem extends CompositeIndexedItem { public SameElementItem(String commonPath) { setIndexName(commonPath); } @Override protected void appendHeadingString(StringBuilder buffer) { } protected void appendBodyString(StringBuilder buffer) { appendIndexString(buffer); buffer.append('{'); for (Iterator<Item> i = getItemIterator(); i.hasNext();) { TermItem term = (TermItem) i.next(); buffer.append(term.getIndexName()).append(':').append(term.getIndexedString()); if (i.hasNext()) { buffer.append(' '); } } buffer.append('}'); } @Override public int getNumWords() { return getItemCount(); } @Override protected void adding(Item item) { Validator.ensureInstanceOf("Child item", item, TermItem.class); TermItem asTerm = (TermItem) item; Validator.ensureNonEmpty("Struct fieldname", asTerm.getIndexName()); Validator.ensureNonEmpty("Query term", asTerm.getIndexedString()); } @Override public ItemType getItemType() { return ItemType.SAME_ELEMENT; } @Override public String getName() { return getItemType().toString(); } }
Fixed.
public void testAddItem() { SameElementItem s = new SameElementItem("structa"); s.addItem(new WordItem("b", "f1")); s.addItem(new WordItem("c", "f2")); s.addItem(new WordItem("d", "f3")); assertEquals("SAME_ELEMENT f1:b f2:c f3:d", s.toString()); }
assertEquals("SAME_ELEMENT f1:b f2:c f3:d", s.toString());
public void testAddItem() { SameElementItem s = new SameElementItem("structa"); s.addItem(new WordItem("b", "f1")); s.addItem(new WordItem("c", "f2")); s.addItem(new WordItem("d", "f3")); assertEquals("structa:{f1:b f2:c f3:d}", s.toString()); }
class SameElementItemTestCase { @Test @Test(expected = IllegalArgumentException.class) public void requireAllChildrenHaveStructMemberNameSet() { SameElementItem s = new SameElementItem("structa"); s.addItem(new WordItem("b", "f1")); s.addItem(new WordItem("c")); } @Test(expected = IllegalArgumentException.class) public void requireAllChildrenHaveNonEmptyTerm() { SameElementItem s = new SameElementItem("structa"); s.addItem(new WordItem("", "f2")); } @Test(expected = IllegalArgumentException.class) public void requireAllChildrenAreTermItems() { SameElementItem s = new SameElementItem("structa"); s.addItem(new AndItem()); } }
class SameElementItemTestCase { @Test @Test(expected = IllegalArgumentException.class) public void requireAllChildrenHaveStructMemberNameSet() { SameElementItem s = new SameElementItem("structa"); s.addItem(new WordItem("b", "f1")); s.addItem(new WordItem("c")); } @Test(expected = IllegalArgumentException.class) public void requireAllChildrenHaveNonEmptyTerm() { SameElementItem s = new SameElementItem("structa"); s.addItem(new WordItem("", "f2")); } @Test(expected = IllegalArgumentException.class) public void requireAllChildrenAreTermItems() { SameElementItem s = new SameElementItem("structa"); s.addItem(new AndItem()); } }
Shouldn't this be in a `finally` block, in case a different exception is thrown? Same below.
private File decompressApplication(InputStream in, String contentType, File tempDir) { try (CompressedApplicationInputStream application = CompressedApplicationInputStream.createFromCompressedStream(in, contentType)) { return decompressApplication(application, tempDir); } catch (IOException e) { cleanupTempDirectory(tempDir); throw new IllegalArgumentException("Unable to decompress data in body", e); } }
cleanupTempDirectory(tempDir);
private File decompressApplication(InputStream in, String contentType, File tempDir) { try (CompressedApplicationInputStream application = CompressedApplicationInputStream.createFromCompressedStream(in, contentType)) { return decompressApplication(application, tempDir); } catch (IOException e) { throw new IllegalArgumentException("Unable to decompress data in body", e); } }
class ApplicationRepository implements com.yahoo.config.provision.Deployer { private static final Logger log = Logger.getLogger(ApplicationRepository.class.getName()); private final TenantRepository tenantRepository; private final Optional<Provisioner> hostProvisioner; private final ApplicationConvergenceChecker convergeChecker; private final HttpProxy httpProxy; private final Clock clock; private final DeployLogger logger = new SilentDeployLogger(); private final ConfigserverConfig configserverConfig; private final Environment environment; private final FileDistributionStatus fileDistributionStatus; @Inject public ApplicationRepository(TenantRepository tenantRepository, HostProvisionerProvider hostProvisionerProvider, ApplicationConvergenceChecker applicationConvergenceChecker, HttpProxy httpProxy, ConfigserverConfig configserverConfig) { this(tenantRepository, hostProvisionerProvider.getHostProvisioner(), applicationConvergenceChecker, httpProxy, configserverConfig, Clock.systemUTC(), new FileDistributionStatus()); } public ApplicationRepository(TenantRepository tenantRepository, Provisioner hostProvisioner, Clock clock) { this(tenantRepository, Optional.of(hostProvisioner), new ApplicationConvergenceChecker(), new HttpProxy(new SimpleHttpFetcher()), new ConfigserverConfig(new ConfigserverConfig.Builder()), clock, new FileDistributionStatus()); } private ApplicationRepository(TenantRepository tenantRepository, Optional<Provisioner> hostProvisioner, ApplicationConvergenceChecker applicationConvergenceChecker, HttpProxy httpProxy, ConfigserverConfig configserverConfig, Clock clock, FileDistributionStatus fileDistributionStatus) { this.tenantRepository = tenantRepository; this.hostProvisioner = hostProvisioner; this.convergeChecker = applicationConvergenceChecker; this.httpProxy = httpProxy; this.clock = clock; this.configserverConfig = configserverConfig; this.environment = Environment.from(configserverConfig.environment()); this.fileDistributionStatus = fileDistributionStatus; } public PrepareResult prepare(Tenant tenant, long sessionId, PrepareParams prepareParams, Instant now) { validateThatLocalSessionIsNotActive(tenant, sessionId); LocalSession session = getLocalSession(tenant, sessionId); ApplicationId applicationId = prepareParams.getApplicationId(); Optional<ApplicationSet> currentActiveApplicationSet = getCurrentActiveApplicationSet(tenant, applicationId); Slime deployLog = createDeployLog(); DeployLogger logger = new DeployHandlerLogger(deployLog.get().setArray("log"), prepareParams.isVerbose(), applicationId); ConfigChangeActions actions = session.prepare(logger, prepareParams, currentActiveApplicationSet, tenant.getPath(), now); logConfigChangeActions(actions, logger); log.log(LogLevel.INFO, TenantRepository.logPre(applicationId) + "Session " + sessionId + " prepared successfully. "); return new PrepareResult(sessionId, actions, deployLog); } public PrepareResult prepareAndActivate(Tenant tenant, long sessionId, PrepareParams prepareParams, boolean ignoreLockFailure, boolean ignoreSessionStaleFailure, Instant now) { PrepareResult result = prepare(tenant, sessionId, prepareParams, now); activate(tenant, sessionId, prepareParams.getTimeoutBudget(), ignoreLockFailure, ignoreSessionStaleFailure); return result; } public PrepareResult deploy(CompressedApplicationInputStream in, PrepareParams prepareParams) { return deploy(in, prepareParams, false, false, clock.instant()); } public PrepareResult deploy(CompressedApplicationInputStream in, PrepareParams prepareParams, boolean ignoreLockFailure, boolean ignoreSessionStaleFailure, Instant now) { File tempDir = Files.createTempDir(); PrepareResult prepareResult = deploy(decompressApplication(in, tempDir), prepareParams, ignoreLockFailure, ignoreSessionStaleFailure, now); cleanupTempDirectory(tempDir); return prepareResult; } public PrepareResult deploy(File applicationPackage, PrepareParams prepareParams) { return deploy(applicationPackage, prepareParams, false, false, Instant.now()); } public PrepareResult deploy(File applicationPackage, PrepareParams prepareParams, boolean ignoreLockFailure, boolean ignoreSessionStaleFailure, Instant now) { ApplicationId applicationId = prepareParams.getApplicationId(); long sessionId = createSession(applicationId, prepareParams.getTimeoutBudget(), applicationPackage); Tenant tenant = tenantRepository.getTenant(applicationId.tenant()); return prepareAndActivate(tenant, sessionId, prepareParams, ignoreLockFailure, ignoreSessionStaleFailure, now); } /** * Creates a new deployment from the active application, if available. * * @param application the active application to be redeployed * @return a new deployment from the local active, or empty if a local active application * was not present for this id (meaning it either is not active or active on another * node in the config server cluster) */ public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application) { return deployFromLocalActive(application, Duration.ofSeconds(configserverConfig.zookeeper().barrierTimeout()).plus(Duration.ofSeconds(5))); } /** * Creates a new deployment from the active application, if available. * * @param application the active application to be redeployed * @param timeout the timeout to use for each individual deployment operation * @return a new deployment from the local active, or empty if a local active application * was not present for this id (meaning it either is not active or active on another * node in the config server cluster) */ @Override public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application, Duration timeout) { Tenant tenant = tenantRepository.getTenant(application.tenant()); if (tenant == null) return Optional.empty(); LocalSession activeSession = getActiveSession(tenant, application); if (activeSession == null) return Optional.empty(); TimeoutBudget timeoutBudget = new TimeoutBudget(clock, timeout); LocalSession newSession = tenant.getSessionFactory().createSessionFromExisting(activeSession, logger, timeoutBudget); tenant.getLocalSessionRepo().addSession(newSession); Version version = decideVersion(application, environment, newSession.getVespaVersion()); return Optional.of(Deployment.unprepared(newSession, this, hostProvisioner, tenant, timeout, clock, false /* don't validate as this is already deployed */, version)); } public ApplicationId activate(Tenant tenant, long sessionId, TimeoutBudget timeoutBudget, boolean ignoreLockFailure, boolean ignoreSessionStaleFailure) { LocalSession localSession = getLocalSession(tenant, sessionId); Deployment deployment = deployFromPreparedSession(localSession, tenant, timeoutBudget.timeLeft()); deployment.setIgnoreLockFailure(ignoreLockFailure); deployment.setIgnoreSessionStaleFailure(ignoreSessionStaleFailure); deployment.activate(); return localSession.getApplicationId(); } private Deployment deployFromPreparedSession(LocalSession session, Tenant tenant, Duration timeout) { return Deployment.prepared(session, this, hostProvisioner, tenant, timeout, clock); } /** * Removes a previously deployed application * * @return true if the application was found and removed, false if it was not present * @throws RuntimeException if the remove transaction fails. This method is exception safe. */ public boolean remove(ApplicationId applicationId) { Optional<Tenant> owner = Optional.ofNullable(tenantRepository.getTenant(applicationId.tenant())); if ( ! owner.isPresent()) return false; TenantApplications tenantApplications = owner.get().getApplicationRepo(); if ( ! tenantApplications.listApplications().contains(applicationId)) return false; long sessionId = tenantApplications.getSessionIdForApplication(applicationId); LocalSessionRepo localSessionRepo = owner.get().getLocalSessionRepo(); LocalSession session = localSessionRepo.getSession(sessionId); if (session == null) return false; NestedTransaction transaction = new NestedTransaction(); localSessionRepo.removeSession(session.getSessionId(), transaction); session.delete(transaction); transaction.add(new Rotations(owner.get().getCurator(), owner.get().getPath()).delete(applicationId)); transaction.add(tenantApplications.deleteApplication(applicationId)); hostProvisioner.ifPresent(provisioner -> provisioner.remove(transaction, applicationId)); transaction.onCommitted(() -> log.log(LogLevel.INFO, "Deleted " + applicationId)); transaction.commit(); return true; } public HttpResponse clusterControllerStatusPage(Tenant tenant, ApplicationId applicationId, String hostName, String pathSuffix) { Application application = getApplication(tenant, applicationId); String relativePath = "clustercontroller-status/" + pathSuffix; return httpProxy.get(application, hostName, "container-clustercontroller", relativePath); } public Long getApplicationGeneration(Tenant tenant, ApplicationId applicationId) { return getApplication(tenant, applicationId).getApplicationGeneration(); } public void restart(ApplicationId applicationId, HostFilter hostFilter) { hostProvisioner.ifPresent(provisioner -> provisioner.restart(applicationId, hostFilter)); } public HttpResponse filedistributionStatus(Tenant tenant, ApplicationId applicationId, Duration timeout) { Application application = getApplication(tenant, applicationId); return fileDistributionStatus.status(application, timeout); } public ApplicationFile getApplicationFileFromSession(TenantName tenantName, long sessionId, String path, LocalSession.Mode mode) { Tenant tenant = tenantRepository.getTenant(tenantName); return getLocalSession(tenant, sessionId).getApplicationFile(Path.fromString(path), mode); } private Application getApplication(Tenant tenant, ApplicationId applicationId) { long sessionId = getSessionIdForApplication(tenant, applicationId); RemoteSession session = tenant.getRemoteSessionRepo().getSession(sessionId, 0); return session.ensureApplicationLoaded().getForVersionOrLatest(Optional.empty(), clock.instant()); } public HttpResponse serviceConvergenceCheck(Tenant tenant, ApplicationId applicationId, String hostname, URI uri) { Application application = getApplication(tenant, applicationId); return convergeChecker.serviceConvergenceCheck(application, hostname, uri); } public HttpResponse serviceListToCheckForConfigConvergence(Tenant tenant, ApplicationId applicationId, URI uri) { Application application = getApplication(tenant, applicationId); return convergeChecker.serviceListToCheckForConfigConvergence(application, uri); } /** * Gets the active Session for the given application id. * * @return the active session, or null if there is no active session for the given application id. */ public LocalSession getActiveSession(ApplicationId applicationId) { return getActiveSession(tenantRepository.getTenant(applicationId.tenant()), applicationId); } public long getSessionIdForApplication(Tenant tenant, ApplicationId applicationId) { return tenant.getApplicationRepo().getSessionIdForApplication(applicationId); } public void validateThatRemoteSessionIsNotActive(Tenant tenant, long sessionId) { RemoteSession session = getRemoteSession(tenant, sessionId); if (Session.Status.ACTIVATE.equals(session.getStatus())) { throw new IllegalStateException("Session is active: " + sessionId); } } public void validateThatRemoteSessionIsPrepared(Tenant tenant, long sessionId) { RemoteSession session = getRemoteSession(tenant, sessionId); if (!Session.Status.PREPARE.equals(session.getStatus())) throw new IllegalStateException("Session not prepared: " + sessionId); } public long createSessionFromExisting(ApplicationId applicationId, DeployLogger logger, TimeoutBudget timeoutBudget) { Tenant tenant = tenantRepository.getTenant(applicationId.tenant()); LocalSessionRepo localSessionRepo = tenant.getLocalSessionRepo(); SessionFactory sessionFactory = tenant.getSessionFactory(); LocalSession fromSession = getExistingSession(tenant, applicationId); LocalSession session = sessionFactory.createSessionFromExisting(fromSession, logger, timeoutBudget); localSessionRepo.addSession(session); return session.getSessionId(); } public long createSession(ApplicationId applicationId, TimeoutBudget timeoutBudget, InputStream in, String contentType) { File tempDir = Files.createTempDir(); long sessionId = createSession(applicationId, timeoutBudget, decompressApplication(in, contentType, tempDir)); cleanupTempDirectory(tempDir); return sessionId; } public long createSession(ApplicationId applicationId, TimeoutBudget timeoutBudget, File applicationDirectory) { Tenant tenant = tenantRepository.getTenant(applicationId.tenant()); LocalSessionRepo localSessionRepo = tenant.getLocalSessionRepo(); SessionFactory sessionFactory = tenant.getSessionFactory(); LocalSession session = sessionFactory.createSession(applicationDirectory, applicationId, timeoutBudget); localSessionRepo.addSession(session); return session.getSessionId(); } public Set<TenantName> removeUnusedTenants() { Set<TenantName> tenantsToBeDeleted = tenantRepository.getAllTenantNames().stream() .filter(tenantName -> activeApplications(tenantName).isEmpty()) .filter(tenantName -> !tenantName.equals(TenantName.defaultName())) .collect(Collectors.toSet()); tenantsToBeDeleted.forEach(tenantRepository::deleteTenant); return tenantsToBeDeleted; } public void deleteTenant(TenantName tenantName) { List<ApplicationId> activeApplications = activeApplications(tenantName); if (activeApplications.isEmpty()) tenantRepository.deleteTenant(tenantName); else throw new IllegalArgumentException("Cannot delete tenant '" + tenantName + "', it has active applications: " + activeApplications); } private List<ApplicationId> activeApplications(TenantName tenantName) { return tenantRepository.getTenant(tenantName).getApplicationRepo().listApplications(); } public Tenant verifyTenantAndApplication(ApplicationId applicationId) { TenantName tenantName = applicationId.tenant(); if (!tenantRepository.checkThatTenantExists(tenantName)) { throw new IllegalArgumentException("Tenant " + tenantName + " was not found."); } Tenant tenant = tenantRepository.getTenant(tenantName); List<ApplicationId> applicationIds = listApplicationIds(tenant); if (!applicationIds.contains(applicationId)) { throw new IllegalArgumentException("No such application id: " + applicationId); } return tenant; } public ApplicationMetaData getMetadataFromSession(Tenant tenant, long sessionId) { return getLocalSession(tenant, sessionId).getMetaData(); } private void validateThatLocalSessionIsNotActive(Tenant tenant, long sessionId) { LocalSession session = getLocalSession(tenant, sessionId); if (Session.Status.ACTIVATE.equals(session.getStatus())) { throw new IllegalStateException("Session is active: " + sessionId); } } private LocalSession getLocalSession(Tenant tenant, long sessionId) { LocalSession session = tenant.getLocalSessionRepo().getSession(sessionId); if (session == null) throw new NotFoundException("Session " + sessionId + " was not found"); return session; } private RemoteSession getRemoteSession(Tenant tenant, long sessionId) { RemoteSession session = tenant.getRemoteSessionRepo().getSession(sessionId); if (session == null) throw new NotFoundException("Session " + sessionId + " was not found"); return session; } private Optional<ApplicationSet> getCurrentActiveApplicationSet(Tenant tenant, ApplicationId appId) { Optional<ApplicationSet> currentActiveApplicationSet = Optional.empty(); TenantApplications applicationRepo = tenant.getApplicationRepo(); try { long currentActiveSessionId = applicationRepo.getSessionIdForApplication(appId); RemoteSession currentActiveSession = getRemoteSession(tenant, currentActiveSessionId); if (currentActiveSession != null) { currentActiveApplicationSet = Optional.ofNullable(currentActiveSession.ensureApplicationLoaded()); } } catch (IllegalArgumentException e) { } return currentActiveApplicationSet; } private File decompressApplication(CompressedApplicationInputStream in, File tempDir) { try { return in.decompress(tempDir); } catch (IOException e) { cleanupTempDirectory(tempDir); throw new IllegalArgumentException("Unable to decompress stream", e); } } private List<ApplicationId> listApplicationIds(Tenant tenant) { TenantApplications applicationRepo = tenant.getApplicationRepo(); return applicationRepo.listApplications(); } private void cleanupTempDirectory(File tempDir) { logger.log(LogLevel.DEBUG, "Deleting tmp dir '" + tempDir + "'"); if (!IOUtils.recursiveDeleteDir(tempDir)) { logger.log(LogLevel.WARNING, "Not able to delete tmp dir '" + tempDir + "'"); } } void redeployAllApplications() throws InterruptedException { ExecutorService executor = Executors.newFixedThreadPool(configserverConfig.numParallelTenantLoaders(), new DaemonThreadFactory("redeploy apps")); Map<ApplicationId, Future<?>> futures = new HashMap<>(); tenantRepository.getAllTenants() .forEach(tenant -> listApplicationIds(tenant) .forEach(appId -> deployFromLocalActive(appId).ifPresent( deployment -> futures.put(appId,executor.submit(deployment::activate))))); for (Map.Entry<ApplicationId, Future<?>> f : futures.entrySet()) { try { f.getValue().get(); } catch (ExecutionException e) { throw new RuntimeException("Redeploying of " + f.getKey() + " failed", e); } } executor.shutdown(); executor.awaitTermination(365, TimeUnit.DAYS); } private LocalSession getExistingSession(Tenant tenant, ApplicationId applicationId) { TenantApplications applicationRepo = tenant.getApplicationRepo(); return getLocalSession(tenant, applicationRepo.getSessionIdForApplication(applicationId)); } private LocalSession getActiveSession(Tenant tenant, ApplicationId applicationId) { TenantApplications applicationRepo = tenant.getApplicationRepo(); if (applicationRepo.listApplications().contains(applicationId)) { return tenant.getLocalSessionRepo().getSession(applicationRepo.getSessionIdForApplication(applicationId)); } return null; } private static void logConfigChangeActions(ConfigChangeActions actions, DeployLogger logger) { RestartActions restartActions = actions.getRestartActions(); if ( ! restartActions.isEmpty()) { logger.log(Level.WARNING, "Change(s) between active and new application that require restart:\n" + restartActions.format()); } RefeedActions refeedActions = actions.getRefeedActions(); if ( ! refeedActions.isEmpty()) { boolean allAllowed = refeedActions.getEntries().stream().allMatch(RefeedActions.Entry::allowed); logger.log(allAllowed ? Level.INFO : Level.WARNING, "Change(s) between active and new application that may require re-feed:\n" + refeedActions.format()); } } /** Returns version to use when deploying application in given environment */ static Version decideVersion(ApplicationId application, Environment environment, Version targetVersion) { if (environment.isManuallyDeployed() && !"hosted-vespa".equals(application.tenant().value())) { return Vtag.currentVersion; } return targetVersion; } public Slime createDeployLog() { Slime deployLog = new Slime(); deployLog.setObject(); return deployLog; } }
class ApplicationRepository implements com.yahoo.config.provision.Deployer { private static final Logger log = Logger.getLogger(ApplicationRepository.class.getName()); private final TenantRepository tenantRepository; private final Optional<Provisioner> hostProvisioner; private final ApplicationConvergenceChecker convergeChecker; private final HttpProxy httpProxy; private final Clock clock; private final DeployLogger logger = new SilentDeployLogger(); private final ConfigserverConfig configserverConfig; private final Environment environment; private final FileDistributionStatus fileDistributionStatus; @Inject public ApplicationRepository(TenantRepository tenantRepository, HostProvisionerProvider hostProvisionerProvider, ApplicationConvergenceChecker applicationConvergenceChecker, HttpProxy httpProxy, ConfigserverConfig configserverConfig) { this(tenantRepository, hostProvisionerProvider.getHostProvisioner(), applicationConvergenceChecker, httpProxy, configserverConfig, Clock.systemUTC(), new FileDistributionStatus()); } public ApplicationRepository(TenantRepository tenantRepository, Provisioner hostProvisioner, Clock clock) { this(tenantRepository, Optional.of(hostProvisioner), new ApplicationConvergenceChecker(), new HttpProxy(new SimpleHttpFetcher()), new ConfigserverConfig(new ConfigserverConfig.Builder()), clock, new FileDistributionStatus()); } private ApplicationRepository(TenantRepository tenantRepository, Optional<Provisioner> hostProvisioner, ApplicationConvergenceChecker applicationConvergenceChecker, HttpProxy httpProxy, ConfigserverConfig configserverConfig, Clock clock, FileDistributionStatus fileDistributionStatus) { this.tenantRepository = tenantRepository; this.hostProvisioner = hostProvisioner; this.convergeChecker = applicationConvergenceChecker; this.httpProxy = httpProxy; this.clock = clock; this.configserverConfig = configserverConfig; this.environment = Environment.from(configserverConfig.environment()); this.fileDistributionStatus = fileDistributionStatus; } public PrepareResult prepare(Tenant tenant, long sessionId, PrepareParams prepareParams, Instant now) { validateThatLocalSessionIsNotActive(tenant, sessionId); LocalSession session = getLocalSession(tenant, sessionId); ApplicationId applicationId = prepareParams.getApplicationId(); Optional<ApplicationSet> currentActiveApplicationSet = getCurrentActiveApplicationSet(tenant, applicationId); Slime deployLog = createDeployLog(); DeployLogger logger = new DeployHandlerLogger(deployLog.get().setArray("log"), prepareParams.isVerbose(), applicationId); ConfigChangeActions actions = session.prepare(logger, prepareParams, currentActiveApplicationSet, tenant.getPath(), now); logConfigChangeActions(actions, logger); log.log(LogLevel.INFO, TenantRepository.logPre(applicationId) + "Session " + sessionId + " prepared successfully. "); return new PrepareResult(sessionId, actions, deployLog); } public PrepareResult prepareAndActivate(Tenant tenant, long sessionId, PrepareParams prepareParams, boolean ignoreLockFailure, boolean ignoreSessionStaleFailure, Instant now) { PrepareResult result = prepare(tenant, sessionId, prepareParams, now); activate(tenant, sessionId, prepareParams.getTimeoutBudget(), ignoreLockFailure, ignoreSessionStaleFailure); return result; } public PrepareResult deploy(CompressedApplicationInputStream in, PrepareParams prepareParams) { return deploy(in, prepareParams, false, false, clock.instant()); } public PrepareResult deploy(CompressedApplicationInputStream in, PrepareParams prepareParams, boolean ignoreLockFailure, boolean ignoreSessionStaleFailure, Instant now) { File tempDir = Files.createTempDir(); PrepareResult prepareResult; try { prepareResult = deploy(decompressApplication(in, tempDir), prepareParams, ignoreLockFailure, ignoreSessionStaleFailure, now); } finally { cleanupTempDirectory(tempDir); } return prepareResult; } public PrepareResult deploy(File applicationPackage, PrepareParams prepareParams) { return deploy(applicationPackage, prepareParams, false, false, Instant.now()); } public PrepareResult deploy(File applicationPackage, PrepareParams prepareParams, boolean ignoreLockFailure, boolean ignoreSessionStaleFailure, Instant now) { ApplicationId applicationId = prepareParams.getApplicationId(); long sessionId = createSession(applicationId, prepareParams.getTimeoutBudget(), applicationPackage); Tenant tenant = tenantRepository.getTenant(applicationId.tenant()); return prepareAndActivate(tenant, sessionId, prepareParams, ignoreLockFailure, ignoreSessionStaleFailure, now); } /** * Creates a new deployment from the active application, if available. * * @param application the active application to be redeployed * @return a new deployment from the local active, or empty if a local active application * was not present for this id (meaning it either is not active or active on another * node in the config server cluster) */ public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application) { return deployFromLocalActive(application, Duration.ofSeconds(configserverConfig.zookeeper().barrierTimeout()).plus(Duration.ofSeconds(5))); } /** * Creates a new deployment from the active application, if available. * * @param application the active application to be redeployed * @param timeout the timeout to use for each individual deployment operation * @return a new deployment from the local active, or empty if a local active application * was not present for this id (meaning it either is not active or active on another * node in the config server cluster) */ @Override public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application, Duration timeout) { Tenant tenant = tenantRepository.getTenant(application.tenant()); if (tenant == null) return Optional.empty(); LocalSession activeSession = getActiveSession(tenant, application); if (activeSession == null) return Optional.empty(); TimeoutBudget timeoutBudget = new TimeoutBudget(clock, timeout); LocalSession newSession = tenant.getSessionFactory().createSessionFromExisting(activeSession, logger, timeoutBudget); tenant.getLocalSessionRepo().addSession(newSession); Version version = decideVersion(application, environment, newSession.getVespaVersion()); return Optional.of(Deployment.unprepared(newSession, this, hostProvisioner, tenant, timeout, clock, false /* don't validate as this is already deployed */, version)); } public ApplicationId activate(Tenant tenant, long sessionId, TimeoutBudget timeoutBudget, boolean ignoreLockFailure, boolean ignoreSessionStaleFailure) { LocalSession localSession = getLocalSession(tenant, sessionId); Deployment deployment = deployFromPreparedSession(localSession, tenant, timeoutBudget.timeLeft()); deployment.setIgnoreLockFailure(ignoreLockFailure); deployment.setIgnoreSessionStaleFailure(ignoreSessionStaleFailure); deployment.activate(); return localSession.getApplicationId(); } private Deployment deployFromPreparedSession(LocalSession session, Tenant tenant, Duration timeout) { return Deployment.prepared(session, this, hostProvisioner, tenant, timeout, clock); } /** * Removes a previously deployed application * * @return true if the application was found and removed, false if it was not present * @throws RuntimeException if the remove transaction fails. This method is exception safe. */ public boolean remove(ApplicationId applicationId) { Optional<Tenant> owner = Optional.ofNullable(tenantRepository.getTenant(applicationId.tenant())); if ( ! owner.isPresent()) return false; TenantApplications tenantApplications = owner.get().getApplicationRepo(); if ( ! tenantApplications.listApplications().contains(applicationId)) return false; long sessionId = tenantApplications.getSessionIdForApplication(applicationId); LocalSessionRepo localSessionRepo = owner.get().getLocalSessionRepo(); LocalSession session = localSessionRepo.getSession(sessionId); if (session == null) return false; NestedTransaction transaction = new NestedTransaction(); localSessionRepo.removeSession(session.getSessionId(), transaction); session.delete(transaction); transaction.add(new Rotations(owner.get().getCurator(), owner.get().getPath()).delete(applicationId)); transaction.add(tenantApplications.deleteApplication(applicationId)); hostProvisioner.ifPresent(provisioner -> provisioner.remove(transaction, applicationId)); transaction.onCommitted(() -> log.log(LogLevel.INFO, "Deleted " + applicationId)); transaction.commit(); return true; } public HttpResponse clusterControllerStatusPage(Tenant tenant, ApplicationId applicationId, String hostName, String pathSuffix) { Application application = getApplication(tenant, applicationId); String relativePath = "clustercontroller-status/" + pathSuffix; return httpProxy.get(application, hostName, "container-clustercontroller", relativePath); } public Long getApplicationGeneration(Tenant tenant, ApplicationId applicationId) { return getApplication(tenant, applicationId).getApplicationGeneration(); } public void restart(ApplicationId applicationId, HostFilter hostFilter) { hostProvisioner.ifPresent(provisioner -> provisioner.restart(applicationId, hostFilter)); } public HttpResponse filedistributionStatus(Tenant tenant, ApplicationId applicationId, Duration timeout) { Application application = getApplication(tenant, applicationId); return fileDistributionStatus.status(application, timeout); } public ApplicationFile getApplicationFileFromSession(TenantName tenantName, long sessionId, String path, LocalSession.Mode mode) { Tenant tenant = tenantRepository.getTenant(tenantName); return getLocalSession(tenant, sessionId).getApplicationFile(Path.fromString(path), mode); } private Application getApplication(Tenant tenant, ApplicationId applicationId) { long sessionId = getSessionIdForApplication(tenant, applicationId); RemoteSession session = tenant.getRemoteSessionRepo().getSession(sessionId, 0); return session.ensureApplicationLoaded().getForVersionOrLatest(Optional.empty(), clock.instant()); } public HttpResponse serviceConvergenceCheck(Tenant tenant, ApplicationId applicationId, String hostname, URI uri) { Application application = getApplication(tenant, applicationId); return convergeChecker.serviceConvergenceCheck(application, hostname, uri); } public HttpResponse serviceListToCheckForConfigConvergence(Tenant tenant, ApplicationId applicationId, URI uri) { Application application = getApplication(tenant, applicationId); return convergeChecker.serviceListToCheckForConfigConvergence(application, uri); } /** * Gets the active Session for the given application id. * * @return the active session, or null if there is no active session for the given application id. */ public LocalSession getActiveSession(ApplicationId applicationId) { return getActiveSession(tenantRepository.getTenant(applicationId.tenant()), applicationId); } public long getSessionIdForApplication(Tenant tenant, ApplicationId applicationId) { return tenant.getApplicationRepo().getSessionIdForApplication(applicationId); } public void validateThatRemoteSessionIsNotActive(Tenant tenant, long sessionId) { RemoteSession session = getRemoteSession(tenant, sessionId); if (Session.Status.ACTIVATE.equals(session.getStatus())) { throw new IllegalStateException("Session is active: " + sessionId); } } public void validateThatRemoteSessionIsPrepared(Tenant tenant, long sessionId) { RemoteSession session = getRemoteSession(tenant, sessionId); if (!Session.Status.PREPARE.equals(session.getStatus())) throw new IllegalStateException("Session not prepared: " + sessionId); } public long createSessionFromExisting(ApplicationId applicationId, DeployLogger logger, TimeoutBudget timeoutBudget) { Tenant tenant = tenantRepository.getTenant(applicationId.tenant()); LocalSessionRepo localSessionRepo = tenant.getLocalSessionRepo(); SessionFactory sessionFactory = tenant.getSessionFactory(); LocalSession fromSession = getExistingSession(tenant, applicationId); LocalSession session = sessionFactory.createSessionFromExisting(fromSession, logger, timeoutBudget); localSessionRepo.addSession(session); return session.getSessionId(); } public long createSession(ApplicationId applicationId, TimeoutBudget timeoutBudget, InputStream in, String contentType) { File tempDir = Files.createTempDir(); long sessionId; try { sessionId = createSession(applicationId, timeoutBudget, decompressApplication(in, contentType, tempDir)); } finally { cleanupTempDirectory(tempDir); } return sessionId; } public long createSession(ApplicationId applicationId, TimeoutBudget timeoutBudget, File applicationDirectory) { Tenant tenant = tenantRepository.getTenant(applicationId.tenant()); LocalSessionRepo localSessionRepo = tenant.getLocalSessionRepo(); SessionFactory sessionFactory = tenant.getSessionFactory(); LocalSession session = sessionFactory.createSession(applicationDirectory, applicationId, timeoutBudget); localSessionRepo.addSession(session); return session.getSessionId(); } public Set<TenantName> removeUnusedTenants() { Set<TenantName> tenantsToBeDeleted = tenantRepository.getAllTenantNames().stream() .filter(tenantName -> activeApplications(tenantName).isEmpty()) .filter(tenantName -> !tenantName.equals(TenantName.defaultName())) .collect(Collectors.toSet()); tenantsToBeDeleted.forEach(tenantRepository::deleteTenant); return tenantsToBeDeleted; } public void deleteTenant(TenantName tenantName) { List<ApplicationId> activeApplications = activeApplications(tenantName); if (activeApplications.isEmpty()) tenantRepository.deleteTenant(tenantName); else throw new IllegalArgumentException("Cannot delete tenant '" + tenantName + "', it has active applications: " + activeApplications); } private List<ApplicationId> activeApplications(TenantName tenantName) { return tenantRepository.getTenant(tenantName).getApplicationRepo().listApplications(); } public Tenant verifyTenantAndApplication(ApplicationId applicationId) { TenantName tenantName = applicationId.tenant(); if (!tenantRepository.checkThatTenantExists(tenantName)) { throw new IllegalArgumentException("Tenant " + tenantName + " was not found."); } Tenant tenant = tenantRepository.getTenant(tenantName); List<ApplicationId> applicationIds = listApplicationIds(tenant); if (!applicationIds.contains(applicationId)) { throw new IllegalArgumentException("No such application id: " + applicationId); } return tenant; } public ApplicationMetaData getMetadataFromSession(Tenant tenant, long sessionId) { return getLocalSession(tenant, sessionId).getMetaData(); } private void validateThatLocalSessionIsNotActive(Tenant tenant, long sessionId) { LocalSession session = getLocalSession(tenant, sessionId); if (Session.Status.ACTIVATE.equals(session.getStatus())) { throw new IllegalStateException("Session is active: " + sessionId); } } private LocalSession getLocalSession(Tenant tenant, long sessionId) { LocalSession session = tenant.getLocalSessionRepo().getSession(sessionId); if (session == null) throw new NotFoundException("Session " + sessionId + " was not found"); return session; } private RemoteSession getRemoteSession(Tenant tenant, long sessionId) { RemoteSession session = tenant.getRemoteSessionRepo().getSession(sessionId); if (session == null) throw new NotFoundException("Session " + sessionId + " was not found"); return session; } private Optional<ApplicationSet> getCurrentActiveApplicationSet(Tenant tenant, ApplicationId appId) { Optional<ApplicationSet> currentActiveApplicationSet = Optional.empty(); TenantApplications applicationRepo = tenant.getApplicationRepo(); try { long currentActiveSessionId = applicationRepo.getSessionIdForApplication(appId); RemoteSession currentActiveSession = getRemoteSession(tenant, currentActiveSessionId); if (currentActiveSession != null) { currentActiveApplicationSet = Optional.ofNullable(currentActiveSession.ensureApplicationLoaded()); } } catch (IllegalArgumentException e) { } return currentActiveApplicationSet; } private File decompressApplication(CompressedApplicationInputStream in, File tempDir) { try { return in.decompress(tempDir); } catch (IOException e) { throw new IllegalArgumentException("Unable to decompress stream", e); } } private List<ApplicationId> listApplicationIds(Tenant tenant) { TenantApplications applicationRepo = tenant.getApplicationRepo(); return applicationRepo.listApplications(); } private void cleanupTempDirectory(File tempDir) { logger.log(LogLevel.DEBUG, "Deleting tmp dir '" + tempDir + "'"); if (!IOUtils.recursiveDeleteDir(tempDir)) { logger.log(LogLevel.WARNING, "Not able to delete tmp dir '" + tempDir + "'"); } } void redeployAllApplications() throws InterruptedException { ExecutorService executor = Executors.newFixedThreadPool(configserverConfig.numParallelTenantLoaders(), new DaemonThreadFactory("redeploy apps")); Map<ApplicationId, Future<?>> futures = new HashMap<>(); tenantRepository.getAllTenants() .forEach(tenant -> listApplicationIds(tenant) .forEach(appId -> deployFromLocalActive(appId).ifPresent( deployment -> futures.put(appId,executor.submit(deployment::activate))))); for (Map.Entry<ApplicationId, Future<?>> f : futures.entrySet()) { try { f.getValue().get(); } catch (ExecutionException e) { throw new RuntimeException("Redeploying of " + f.getKey() + " failed", e); } } executor.shutdown(); executor.awaitTermination(365, TimeUnit.DAYS); } private LocalSession getExistingSession(Tenant tenant, ApplicationId applicationId) { TenantApplications applicationRepo = tenant.getApplicationRepo(); return getLocalSession(tenant, applicationRepo.getSessionIdForApplication(applicationId)); } private LocalSession getActiveSession(Tenant tenant, ApplicationId applicationId) { TenantApplications applicationRepo = tenant.getApplicationRepo(); if (applicationRepo.listApplications().contains(applicationId)) { return tenant.getLocalSessionRepo().getSession(applicationRepo.getSessionIdForApplication(applicationId)); } return null; } private static void logConfigChangeActions(ConfigChangeActions actions, DeployLogger logger) { RestartActions restartActions = actions.getRestartActions(); if ( ! restartActions.isEmpty()) { logger.log(Level.WARNING, "Change(s) between active and new application that require restart:\n" + restartActions.format()); } RefeedActions refeedActions = actions.getRefeedActions(); if ( ! refeedActions.isEmpty()) { boolean allAllowed = refeedActions.getEntries().stream().allMatch(RefeedActions.Entry::allowed); logger.log(allAllowed ? Level.INFO : Level.WARNING, "Change(s) between active and new application that may require re-feed:\n" + refeedActions.format()); } } /** Returns version to use when deploying application in given environment */ static Version decideVersion(ApplicationId application, Environment environment, Version targetVersion) { if (environment.isManuallyDeployed() && !"hosted-vespa".equals(application.tenant().value())) { return Vtag.currentVersion; } return targetVersion; } public Slime createDeployLog() { Slime deployLog = new Slime(); deployLog.setObject(); return deployLog; } }
No, then it will always delete it, it shouldn't delete it unless it's an exception (or until it does need the temp dir anymore). But this is a bit confusing and was what led to the bug in the first place, I'll try to rewrite the code to make it clearer.
private File decompressApplication(InputStream in, String contentType, File tempDir) { try (CompressedApplicationInputStream application = CompressedApplicationInputStream.createFromCompressedStream(in, contentType)) { return decompressApplication(application, tempDir); } catch (IOException e) { cleanupTempDirectory(tempDir); throw new IllegalArgumentException("Unable to decompress data in body", e); } }
cleanupTempDirectory(tempDir);
private File decompressApplication(InputStream in, String contentType, File tempDir) { try (CompressedApplicationInputStream application = CompressedApplicationInputStream.createFromCompressedStream(in, contentType)) { return decompressApplication(application, tempDir); } catch (IOException e) { throw new IllegalArgumentException("Unable to decompress data in body", e); } }
class ApplicationRepository implements com.yahoo.config.provision.Deployer { private static final Logger log = Logger.getLogger(ApplicationRepository.class.getName()); private final TenantRepository tenantRepository; private final Optional<Provisioner> hostProvisioner; private final ApplicationConvergenceChecker convergeChecker; private final HttpProxy httpProxy; private final Clock clock; private final DeployLogger logger = new SilentDeployLogger(); private final ConfigserverConfig configserverConfig; private final Environment environment; private final FileDistributionStatus fileDistributionStatus; @Inject public ApplicationRepository(TenantRepository tenantRepository, HostProvisionerProvider hostProvisionerProvider, ApplicationConvergenceChecker applicationConvergenceChecker, HttpProxy httpProxy, ConfigserverConfig configserverConfig) { this(tenantRepository, hostProvisionerProvider.getHostProvisioner(), applicationConvergenceChecker, httpProxy, configserverConfig, Clock.systemUTC(), new FileDistributionStatus()); } public ApplicationRepository(TenantRepository tenantRepository, Provisioner hostProvisioner, Clock clock) { this(tenantRepository, Optional.of(hostProvisioner), new ApplicationConvergenceChecker(), new HttpProxy(new SimpleHttpFetcher()), new ConfigserverConfig(new ConfigserverConfig.Builder()), clock, new FileDistributionStatus()); } private ApplicationRepository(TenantRepository tenantRepository, Optional<Provisioner> hostProvisioner, ApplicationConvergenceChecker applicationConvergenceChecker, HttpProxy httpProxy, ConfigserverConfig configserverConfig, Clock clock, FileDistributionStatus fileDistributionStatus) { this.tenantRepository = tenantRepository; this.hostProvisioner = hostProvisioner; this.convergeChecker = applicationConvergenceChecker; this.httpProxy = httpProxy; this.clock = clock; this.configserverConfig = configserverConfig; this.environment = Environment.from(configserverConfig.environment()); this.fileDistributionStatus = fileDistributionStatus; } public PrepareResult prepare(Tenant tenant, long sessionId, PrepareParams prepareParams, Instant now) { validateThatLocalSessionIsNotActive(tenant, sessionId); LocalSession session = getLocalSession(tenant, sessionId); ApplicationId applicationId = prepareParams.getApplicationId(); Optional<ApplicationSet> currentActiveApplicationSet = getCurrentActiveApplicationSet(tenant, applicationId); Slime deployLog = createDeployLog(); DeployLogger logger = new DeployHandlerLogger(deployLog.get().setArray("log"), prepareParams.isVerbose(), applicationId); ConfigChangeActions actions = session.prepare(logger, prepareParams, currentActiveApplicationSet, tenant.getPath(), now); logConfigChangeActions(actions, logger); log.log(LogLevel.INFO, TenantRepository.logPre(applicationId) + "Session " + sessionId + " prepared successfully. "); return new PrepareResult(sessionId, actions, deployLog); } public PrepareResult prepareAndActivate(Tenant tenant, long sessionId, PrepareParams prepareParams, boolean ignoreLockFailure, boolean ignoreSessionStaleFailure, Instant now) { PrepareResult result = prepare(tenant, sessionId, prepareParams, now); activate(tenant, sessionId, prepareParams.getTimeoutBudget(), ignoreLockFailure, ignoreSessionStaleFailure); return result; } public PrepareResult deploy(CompressedApplicationInputStream in, PrepareParams prepareParams) { return deploy(in, prepareParams, false, false, clock.instant()); } public PrepareResult deploy(CompressedApplicationInputStream in, PrepareParams prepareParams, boolean ignoreLockFailure, boolean ignoreSessionStaleFailure, Instant now) { File tempDir = Files.createTempDir(); PrepareResult prepareResult = deploy(decompressApplication(in, tempDir), prepareParams, ignoreLockFailure, ignoreSessionStaleFailure, now); cleanupTempDirectory(tempDir); return prepareResult; } public PrepareResult deploy(File applicationPackage, PrepareParams prepareParams) { return deploy(applicationPackage, prepareParams, false, false, Instant.now()); } public PrepareResult deploy(File applicationPackage, PrepareParams prepareParams, boolean ignoreLockFailure, boolean ignoreSessionStaleFailure, Instant now) { ApplicationId applicationId = prepareParams.getApplicationId(); long sessionId = createSession(applicationId, prepareParams.getTimeoutBudget(), applicationPackage); Tenant tenant = tenantRepository.getTenant(applicationId.tenant()); return prepareAndActivate(tenant, sessionId, prepareParams, ignoreLockFailure, ignoreSessionStaleFailure, now); } /** * Creates a new deployment from the active application, if available. * * @param application the active application to be redeployed * @return a new deployment from the local active, or empty if a local active application * was not present for this id (meaning it either is not active or active on another * node in the config server cluster) */ public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application) { return deployFromLocalActive(application, Duration.ofSeconds(configserverConfig.zookeeper().barrierTimeout()).plus(Duration.ofSeconds(5))); } /** * Creates a new deployment from the active application, if available. * * @param application the active application to be redeployed * @param timeout the timeout to use for each individual deployment operation * @return a new deployment from the local active, or empty if a local active application * was not present for this id (meaning it either is not active or active on another * node in the config server cluster) */ @Override public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application, Duration timeout) { Tenant tenant = tenantRepository.getTenant(application.tenant()); if (tenant == null) return Optional.empty(); LocalSession activeSession = getActiveSession(tenant, application); if (activeSession == null) return Optional.empty(); TimeoutBudget timeoutBudget = new TimeoutBudget(clock, timeout); LocalSession newSession = tenant.getSessionFactory().createSessionFromExisting(activeSession, logger, timeoutBudget); tenant.getLocalSessionRepo().addSession(newSession); Version version = decideVersion(application, environment, newSession.getVespaVersion()); return Optional.of(Deployment.unprepared(newSession, this, hostProvisioner, tenant, timeout, clock, false /* don't validate as this is already deployed */, version)); } public ApplicationId activate(Tenant tenant, long sessionId, TimeoutBudget timeoutBudget, boolean ignoreLockFailure, boolean ignoreSessionStaleFailure) { LocalSession localSession = getLocalSession(tenant, sessionId); Deployment deployment = deployFromPreparedSession(localSession, tenant, timeoutBudget.timeLeft()); deployment.setIgnoreLockFailure(ignoreLockFailure); deployment.setIgnoreSessionStaleFailure(ignoreSessionStaleFailure); deployment.activate(); return localSession.getApplicationId(); } private Deployment deployFromPreparedSession(LocalSession session, Tenant tenant, Duration timeout) { return Deployment.prepared(session, this, hostProvisioner, tenant, timeout, clock); } /** * Removes a previously deployed application * * @return true if the application was found and removed, false if it was not present * @throws RuntimeException if the remove transaction fails. This method is exception safe. */ public boolean remove(ApplicationId applicationId) { Optional<Tenant> owner = Optional.ofNullable(tenantRepository.getTenant(applicationId.tenant())); if ( ! owner.isPresent()) return false; TenantApplications tenantApplications = owner.get().getApplicationRepo(); if ( ! tenantApplications.listApplications().contains(applicationId)) return false; long sessionId = tenantApplications.getSessionIdForApplication(applicationId); LocalSessionRepo localSessionRepo = owner.get().getLocalSessionRepo(); LocalSession session = localSessionRepo.getSession(sessionId); if (session == null) return false; NestedTransaction transaction = new NestedTransaction(); localSessionRepo.removeSession(session.getSessionId(), transaction); session.delete(transaction); transaction.add(new Rotations(owner.get().getCurator(), owner.get().getPath()).delete(applicationId)); transaction.add(tenantApplications.deleteApplication(applicationId)); hostProvisioner.ifPresent(provisioner -> provisioner.remove(transaction, applicationId)); transaction.onCommitted(() -> log.log(LogLevel.INFO, "Deleted " + applicationId)); transaction.commit(); return true; } public HttpResponse clusterControllerStatusPage(Tenant tenant, ApplicationId applicationId, String hostName, String pathSuffix) { Application application = getApplication(tenant, applicationId); String relativePath = "clustercontroller-status/" + pathSuffix; return httpProxy.get(application, hostName, "container-clustercontroller", relativePath); } public Long getApplicationGeneration(Tenant tenant, ApplicationId applicationId) { return getApplication(tenant, applicationId).getApplicationGeneration(); } public void restart(ApplicationId applicationId, HostFilter hostFilter) { hostProvisioner.ifPresent(provisioner -> provisioner.restart(applicationId, hostFilter)); } public HttpResponse filedistributionStatus(Tenant tenant, ApplicationId applicationId, Duration timeout) { Application application = getApplication(tenant, applicationId); return fileDistributionStatus.status(application, timeout); } public ApplicationFile getApplicationFileFromSession(TenantName tenantName, long sessionId, String path, LocalSession.Mode mode) { Tenant tenant = tenantRepository.getTenant(tenantName); return getLocalSession(tenant, sessionId).getApplicationFile(Path.fromString(path), mode); } private Application getApplication(Tenant tenant, ApplicationId applicationId) { long sessionId = getSessionIdForApplication(tenant, applicationId); RemoteSession session = tenant.getRemoteSessionRepo().getSession(sessionId, 0); return session.ensureApplicationLoaded().getForVersionOrLatest(Optional.empty(), clock.instant()); } public HttpResponse serviceConvergenceCheck(Tenant tenant, ApplicationId applicationId, String hostname, URI uri) { Application application = getApplication(tenant, applicationId); return convergeChecker.serviceConvergenceCheck(application, hostname, uri); } public HttpResponse serviceListToCheckForConfigConvergence(Tenant tenant, ApplicationId applicationId, URI uri) { Application application = getApplication(tenant, applicationId); return convergeChecker.serviceListToCheckForConfigConvergence(application, uri); } /** * Gets the active Session for the given application id. * * @return the active session, or null if there is no active session for the given application id. */ public LocalSession getActiveSession(ApplicationId applicationId) { return getActiveSession(tenantRepository.getTenant(applicationId.tenant()), applicationId); } public long getSessionIdForApplication(Tenant tenant, ApplicationId applicationId) { return tenant.getApplicationRepo().getSessionIdForApplication(applicationId); } public void validateThatRemoteSessionIsNotActive(Tenant tenant, long sessionId) { RemoteSession session = getRemoteSession(tenant, sessionId); if (Session.Status.ACTIVATE.equals(session.getStatus())) { throw new IllegalStateException("Session is active: " + sessionId); } } public void validateThatRemoteSessionIsPrepared(Tenant tenant, long sessionId) { RemoteSession session = getRemoteSession(tenant, sessionId); if (!Session.Status.PREPARE.equals(session.getStatus())) throw new IllegalStateException("Session not prepared: " + sessionId); } public long createSessionFromExisting(ApplicationId applicationId, DeployLogger logger, TimeoutBudget timeoutBudget) { Tenant tenant = tenantRepository.getTenant(applicationId.tenant()); LocalSessionRepo localSessionRepo = tenant.getLocalSessionRepo(); SessionFactory sessionFactory = tenant.getSessionFactory(); LocalSession fromSession = getExistingSession(tenant, applicationId); LocalSession session = sessionFactory.createSessionFromExisting(fromSession, logger, timeoutBudget); localSessionRepo.addSession(session); return session.getSessionId(); } public long createSession(ApplicationId applicationId, TimeoutBudget timeoutBudget, InputStream in, String contentType) { File tempDir = Files.createTempDir(); long sessionId = createSession(applicationId, timeoutBudget, decompressApplication(in, contentType, tempDir)); cleanupTempDirectory(tempDir); return sessionId; } public long createSession(ApplicationId applicationId, TimeoutBudget timeoutBudget, File applicationDirectory) { Tenant tenant = tenantRepository.getTenant(applicationId.tenant()); LocalSessionRepo localSessionRepo = tenant.getLocalSessionRepo(); SessionFactory sessionFactory = tenant.getSessionFactory(); LocalSession session = sessionFactory.createSession(applicationDirectory, applicationId, timeoutBudget); localSessionRepo.addSession(session); return session.getSessionId(); } public Set<TenantName> removeUnusedTenants() { Set<TenantName> tenantsToBeDeleted = tenantRepository.getAllTenantNames().stream() .filter(tenantName -> activeApplications(tenantName).isEmpty()) .filter(tenantName -> !tenantName.equals(TenantName.defaultName())) .collect(Collectors.toSet()); tenantsToBeDeleted.forEach(tenantRepository::deleteTenant); return tenantsToBeDeleted; } public void deleteTenant(TenantName tenantName) { List<ApplicationId> activeApplications = activeApplications(tenantName); if (activeApplications.isEmpty()) tenantRepository.deleteTenant(tenantName); else throw new IllegalArgumentException("Cannot delete tenant '" + tenantName + "', it has active applications: " + activeApplications); } private List<ApplicationId> activeApplications(TenantName tenantName) { return tenantRepository.getTenant(tenantName).getApplicationRepo().listApplications(); } public Tenant verifyTenantAndApplication(ApplicationId applicationId) { TenantName tenantName = applicationId.tenant(); if (!tenantRepository.checkThatTenantExists(tenantName)) { throw new IllegalArgumentException("Tenant " + tenantName + " was not found."); } Tenant tenant = tenantRepository.getTenant(tenantName); List<ApplicationId> applicationIds = listApplicationIds(tenant); if (!applicationIds.contains(applicationId)) { throw new IllegalArgumentException("No such application id: " + applicationId); } return tenant; } public ApplicationMetaData getMetadataFromSession(Tenant tenant, long sessionId) { return getLocalSession(tenant, sessionId).getMetaData(); } private void validateThatLocalSessionIsNotActive(Tenant tenant, long sessionId) { LocalSession session = getLocalSession(tenant, sessionId); if (Session.Status.ACTIVATE.equals(session.getStatus())) { throw new IllegalStateException("Session is active: " + sessionId); } } private LocalSession getLocalSession(Tenant tenant, long sessionId) { LocalSession session = tenant.getLocalSessionRepo().getSession(sessionId); if (session == null) throw new NotFoundException("Session " + sessionId + " was not found"); return session; } private RemoteSession getRemoteSession(Tenant tenant, long sessionId) { RemoteSession session = tenant.getRemoteSessionRepo().getSession(sessionId); if (session == null) throw new NotFoundException("Session " + sessionId + " was not found"); return session; } private Optional<ApplicationSet> getCurrentActiveApplicationSet(Tenant tenant, ApplicationId appId) { Optional<ApplicationSet> currentActiveApplicationSet = Optional.empty(); TenantApplications applicationRepo = tenant.getApplicationRepo(); try { long currentActiveSessionId = applicationRepo.getSessionIdForApplication(appId); RemoteSession currentActiveSession = getRemoteSession(tenant, currentActiveSessionId); if (currentActiveSession != null) { currentActiveApplicationSet = Optional.ofNullable(currentActiveSession.ensureApplicationLoaded()); } } catch (IllegalArgumentException e) { } return currentActiveApplicationSet; } private File decompressApplication(CompressedApplicationInputStream in, File tempDir) { try { return in.decompress(tempDir); } catch (IOException e) { cleanupTempDirectory(tempDir); throw new IllegalArgumentException("Unable to decompress stream", e); } } private List<ApplicationId> listApplicationIds(Tenant tenant) { TenantApplications applicationRepo = tenant.getApplicationRepo(); return applicationRepo.listApplications(); } private void cleanupTempDirectory(File tempDir) { logger.log(LogLevel.DEBUG, "Deleting tmp dir '" + tempDir + "'"); if (!IOUtils.recursiveDeleteDir(tempDir)) { logger.log(LogLevel.WARNING, "Not able to delete tmp dir '" + tempDir + "'"); } } void redeployAllApplications() throws InterruptedException { ExecutorService executor = Executors.newFixedThreadPool(configserverConfig.numParallelTenantLoaders(), new DaemonThreadFactory("redeploy apps")); Map<ApplicationId, Future<?>> futures = new HashMap<>(); tenantRepository.getAllTenants() .forEach(tenant -> listApplicationIds(tenant) .forEach(appId -> deployFromLocalActive(appId).ifPresent( deployment -> futures.put(appId,executor.submit(deployment::activate))))); for (Map.Entry<ApplicationId, Future<?>> f : futures.entrySet()) { try { f.getValue().get(); } catch (ExecutionException e) { throw new RuntimeException("Redeploying of " + f.getKey() + " failed", e); } } executor.shutdown(); executor.awaitTermination(365, TimeUnit.DAYS); } private LocalSession getExistingSession(Tenant tenant, ApplicationId applicationId) { TenantApplications applicationRepo = tenant.getApplicationRepo(); return getLocalSession(tenant, applicationRepo.getSessionIdForApplication(applicationId)); } private LocalSession getActiveSession(Tenant tenant, ApplicationId applicationId) { TenantApplications applicationRepo = tenant.getApplicationRepo(); if (applicationRepo.listApplications().contains(applicationId)) { return tenant.getLocalSessionRepo().getSession(applicationRepo.getSessionIdForApplication(applicationId)); } return null; } private static void logConfigChangeActions(ConfigChangeActions actions, DeployLogger logger) { RestartActions restartActions = actions.getRestartActions(); if ( ! restartActions.isEmpty()) { logger.log(Level.WARNING, "Change(s) between active and new application that require restart:\n" + restartActions.format()); } RefeedActions refeedActions = actions.getRefeedActions(); if ( ! refeedActions.isEmpty()) { boolean allAllowed = refeedActions.getEntries().stream().allMatch(RefeedActions.Entry::allowed); logger.log(allAllowed ? Level.INFO : Level.WARNING, "Change(s) between active and new application that may require re-feed:\n" + refeedActions.format()); } } /** Returns version to use when deploying application in given environment */ static Version decideVersion(ApplicationId application, Environment environment, Version targetVersion) { if (environment.isManuallyDeployed() && !"hosted-vespa".equals(application.tenant().value())) { return Vtag.currentVersion; } return targetVersion; } public Slime createDeployLog() { Slime deployLog = new Slime(); deployLog.setObject(); return deployLog; } }
class ApplicationRepository implements com.yahoo.config.provision.Deployer { private static final Logger log = Logger.getLogger(ApplicationRepository.class.getName()); private final TenantRepository tenantRepository; private final Optional<Provisioner> hostProvisioner; private final ApplicationConvergenceChecker convergeChecker; private final HttpProxy httpProxy; private final Clock clock; private final DeployLogger logger = new SilentDeployLogger(); private final ConfigserverConfig configserverConfig; private final Environment environment; private final FileDistributionStatus fileDistributionStatus; @Inject public ApplicationRepository(TenantRepository tenantRepository, HostProvisionerProvider hostProvisionerProvider, ApplicationConvergenceChecker applicationConvergenceChecker, HttpProxy httpProxy, ConfigserverConfig configserverConfig) { this(tenantRepository, hostProvisionerProvider.getHostProvisioner(), applicationConvergenceChecker, httpProxy, configserverConfig, Clock.systemUTC(), new FileDistributionStatus()); } public ApplicationRepository(TenantRepository tenantRepository, Provisioner hostProvisioner, Clock clock) { this(tenantRepository, Optional.of(hostProvisioner), new ApplicationConvergenceChecker(), new HttpProxy(new SimpleHttpFetcher()), new ConfigserverConfig(new ConfigserverConfig.Builder()), clock, new FileDistributionStatus()); } private ApplicationRepository(TenantRepository tenantRepository, Optional<Provisioner> hostProvisioner, ApplicationConvergenceChecker applicationConvergenceChecker, HttpProxy httpProxy, ConfigserverConfig configserverConfig, Clock clock, FileDistributionStatus fileDistributionStatus) { this.tenantRepository = tenantRepository; this.hostProvisioner = hostProvisioner; this.convergeChecker = applicationConvergenceChecker; this.httpProxy = httpProxy; this.clock = clock; this.configserverConfig = configserverConfig; this.environment = Environment.from(configserverConfig.environment()); this.fileDistributionStatus = fileDistributionStatus; } public PrepareResult prepare(Tenant tenant, long sessionId, PrepareParams prepareParams, Instant now) { validateThatLocalSessionIsNotActive(tenant, sessionId); LocalSession session = getLocalSession(tenant, sessionId); ApplicationId applicationId = prepareParams.getApplicationId(); Optional<ApplicationSet> currentActiveApplicationSet = getCurrentActiveApplicationSet(tenant, applicationId); Slime deployLog = createDeployLog(); DeployLogger logger = new DeployHandlerLogger(deployLog.get().setArray("log"), prepareParams.isVerbose(), applicationId); ConfigChangeActions actions = session.prepare(logger, prepareParams, currentActiveApplicationSet, tenant.getPath(), now); logConfigChangeActions(actions, logger); log.log(LogLevel.INFO, TenantRepository.logPre(applicationId) + "Session " + sessionId + " prepared successfully. "); return new PrepareResult(sessionId, actions, deployLog); } public PrepareResult prepareAndActivate(Tenant tenant, long sessionId, PrepareParams prepareParams, boolean ignoreLockFailure, boolean ignoreSessionStaleFailure, Instant now) { PrepareResult result = prepare(tenant, sessionId, prepareParams, now); activate(tenant, sessionId, prepareParams.getTimeoutBudget(), ignoreLockFailure, ignoreSessionStaleFailure); return result; } public PrepareResult deploy(CompressedApplicationInputStream in, PrepareParams prepareParams) { return deploy(in, prepareParams, false, false, clock.instant()); } public PrepareResult deploy(CompressedApplicationInputStream in, PrepareParams prepareParams, boolean ignoreLockFailure, boolean ignoreSessionStaleFailure, Instant now) { File tempDir = Files.createTempDir(); PrepareResult prepareResult; try { prepareResult = deploy(decompressApplication(in, tempDir), prepareParams, ignoreLockFailure, ignoreSessionStaleFailure, now); } finally { cleanupTempDirectory(tempDir); } return prepareResult; } public PrepareResult deploy(File applicationPackage, PrepareParams prepareParams) { return deploy(applicationPackage, prepareParams, false, false, Instant.now()); } public PrepareResult deploy(File applicationPackage, PrepareParams prepareParams, boolean ignoreLockFailure, boolean ignoreSessionStaleFailure, Instant now) { ApplicationId applicationId = prepareParams.getApplicationId(); long sessionId = createSession(applicationId, prepareParams.getTimeoutBudget(), applicationPackage); Tenant tenant = tenantRepository.getTenant(applicationId.tenant()); return prepareAndActivate(tenant, sessionId, prepareParams, ignoreLockFailure, ignoreSessionStaleFailure, now); } /** * Creates a new deployment from the active application, if available. * * @param application the active application to be redeployed * @return a new deployment from the local active, or empty if a local active application * was not present for this id (meaning it either is not active or active on another * node in the config server cluster) */ public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application) { return deployFromLocalActive(application, Duration.ofSeconds(configserverConfig.zookeeper().barrierTimeout()).plus(Duration.ofSeconds(5))); } /** * Creates a new deployment from the active application, if available. * * @param application the active application to be redeployed * @param timeout the timeout to use for each individual deployment operation * @return a new deployment from the local active, or empty if a local active application * was not present for this id (meaning it either is not active or active on another * node in the config server cluster) */ @Override public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application, Duration timeout) { Tenant tenant = tenantRepository.getTenant(application.tenant()); if (tenant == null) return Optional.empty(); LocalSession activeSession = getActiveSession(tenant, application); if (activeSession == null) return Optional.empty(); TimeoutBudget timeoutBudget = new TimeoutBudget(clock, timeout); LocalSession newSession = tenant.getSessionFactory().createSessionFromExisting(activeSession, logger, timeoutBudget); tenant.getLocalSessionRepo().addSession(newSession); Version version = decideVersion(application, environment, newSession.getVespaVersion()); return Optional.of(Deployment.unprepared(newSession, this, hostProvisioner, tenant, timeout, clock, false /* don't validate as this is already deployed */, version)); } public ApplicationId activate(Tenant tenant, long sessionId, TimeoutBudget timeoutBudget, boolean ignoreLockFailure, boolean ignoreSessionStaleFailure) { LocalSession localSession = getLocalSession(tenant, sessionId); Deployment deployment = deployFromPreparedSession(localSession, tenant, timeoutBudget.timeLeft()); deployment.setIgnoreLockFailure(ignoreLockFailure); deployment.setIgnoreSessionStaleFailure(ignoreSessionStaleFailure); deployment.activate(); return localSession.getApplicationId(); } private Deployment deployFromPreparedSession(LocalSession session, Tenant tenant, Duration timeout) { return Deployment.prepared(session, this, hostProvisioner, tenant, timeout, clock); } /** * Removes a previously deployed application * * @return true if the application was found and removed, false if it was not present * @throws RuntimeException if the remove transaction fails. This method is exception safe. */ public boolean remove(ApplicationId applicationId) { Optional<Tenant> owner = Optional.ofNullable(tenantRepository.getTenant(applicationId.tenant())); if ( ! owner.isPresent()) return false; TenantApplications tenantApplications = owner.get().getApplicationRepo(); if ( ! tenantApplications.listApplications().contains(applicationId)) return false; long sessionId = tenantApplications.getSessionIdForApplication(applicationId); LocalSessionRepo localSessionRepo = owner.get().getLocalSessionRepo(); LocalSession session = localSessionRepo.getSession(sessionId); if (session == null) return false; NestedTransaction transaction = new NestedTransaction(); localSessionRepo.removeSession(session.getSessionId(), transaction); session.delete(transaction); transaction.add(new Rotations(owner.get().getCurator(), owner.get().getPath()).delete(applicationId)); transaction.add(tenantApplications.deleteApplication(applicationId)); hostProvisioner.ifPresent(provisioner -> provisioner.remove(transaction, applicationId)); transaction.onCommitted(() -> log.log(LogLevel.INFO, "Deleted " + applicationId)); transaction.commit(); return true; } public HttpResponse clusterControllerStatusPage(Tenant tenant, ApplicationId applicationId, String hostName, String pathSuffix) { Application application = getApplication(tenant, applicationId); String relativePath = "clustercontroller-status/" + pathSuffix; return httpProxy.get(application, hostName, "container-clustercontroller", relativePath); } public Long getApplicationGeneration(Tenant tenant, ApplicationId applicationId) { return getApplication(tenant, applicationId).getApplicationGeneration(); } public void restart(ApplicationId applicationId, HostFilter hostFilter) { hostProvisioner.ifPresent(provisioner -> provisioner.restart(applicationId, hostFilter)); } public HttpResponse filedistributionStatus(Tenant tenant, ApplicationId applicationId, Duration timeout) { Application application = getApplication(tenant, applicationId); return fileDistributionStatus.status(application, timeout); } public ApplicationFile getApplicationFileFromSession(TenantName tenantName, long sessionId, String path, LocalSession.Mode mode) { Tenant tenant = tenantRepository.getTenant(tenantName); return getLocalSession(tenant, sessionId).getApplicationFile(Path.fromString(path), mode); } private Application getApplication(Tenant tenant, ApplicationId applicationId) { long sessionId = getSessionIdForApplication(tenant, applicationId); RemoteSession session = tenant.getRemoteSessionRepo().getSession(sessionId, 0); return session.ensureApplicationLoaded().getForVersionOrLatest(Optional.empty(), clock.instant()); } public HttpResponse serviceConvergenceCheck(Tenant tenant, ApplicationId applicationId, String hostname, URI uri) { Application application = getApplication(tenant, applicationId); return convergeChecker.serviceConvergenceCheck(application, hostname, uri); } public HttpResponse serviceListToCheckForConfigConvergence(Tenant tenant, ApplicationId applicationId, URI uri) { Application application = getApplication(tenant, applicationId); return convergeChecker.serviceListToCheckForConfigConvergence(application, uri); } /** * Gets the active Session for the given application id. * * @return the active session, or null if there is no active session for the given application id. */ public LocalSession getActiveSession(ApplicationId applicationId) { return getActiveSession(tenantRepository.getTenant(applicationId.tenant()), applicationId); } public long getSessionIdForApplication(Tenant tenant, ApplicationId applicationId) { return tenant.getApplicationRepo().getSessionIdForApplication(applicationId); } public void validateThatRemoteSessionIsNotActive(Tenant tenant, long sessionId) { RemoteSession session = getRemoteSession(tenant, sessionId); if (Session.Status.ACTIVATE.equals(session.getStatus())) { throw new IllegalStateException("Session is active: " + sessionId); } } public void validateThatRemoteSessionIsPrepared(Tenant tenant, long sessionId) { RemoteSession session = getRemoteSession(tenant, sessionId); if (!Session.Status.PREPARE.equals(session.getStatus())) throw new IllegalStateException("Session not prepared: " + sessionId); } public long createSessionFromExisting(ApplicationId applicationId, DeployLogger logger, TimeoutBudget timeoutBudget) { Tenant tenant = tenantRepository.getTenant(applicationId.tenant()); LocalSessionRepo localSessionRepo = tenant.getLocalSessionRepo(); SessionFactory sessionFactory = tenant.getSessionFactory(); LocalSession fromSession = getExistingSession(tenant, applicationId); LocalSession session = sessionFactory.createSessionFromExisting(fromSession, logger, timeoutBudget); localSessionRepo.addSession(session); return session.getSessionId(); } public long createSession(ApplicationId applicationId, TimeoutBudget timeoutBudget, InputStream in, String contentType) { File tempDir = Files.createTempDir(); long sessionId; try { sessionId = createSession(applicationId, timeoutBudget, decompressApplication(in, contentType, tempDir)); } finally { cleanupTempDirectory(tempDir); } return sessionId; } public long createSession(ApplicationId applicationId, TimeoutBudget timeoutBudget, File applicationDirectory) { Tenant tenant = tenantRepository.getTenant(applicationId.tenant()); LocalSessionRepo localSessionRepo = tenant.getLocalSessionRepo(); SessionFactory sessionFactory = tenant.getSessionFactory(); LocalSession session = sessionFactory.createSession(applicationDirectory, applicationId, timeoutBudget); localSessionRepo.addSession(session); return session.getSessionId(); } public Set<TenantName> removeUnusedTenants() { Set<TenantName> tenantsToBeDeleted = tenantRepository.getAllTenantNames().stream() .filter(tenantName -> activeApplications(tenantName).isEmpty()) .filter(tenantName -> !tenantName.equals(TenantName.defaultName())) .collect(Collectors.toSet()); tenantsToBeDeleted.forEach(tenantRepository::deleteTenant); return tenantsToBeDeleted; } public void deleteTenant(TenantName tenantName) { List<ApplicationId> activeApplications = activeApplications(tenantName); if (activeApplications.isEmpty()) tenantRepository.deleteTenant(tenantName); else throw new IllegalArgumentException("Cannot delete tenant '" + tenantName + "', it has active applications: " + activeApplications); } private List<ApplicationId> activeApplications(TenantName tenantName) { return tenantRepository.getTenant(tenantName).getApplicationRepo().listApplications(); } public Tenant verifyTenantAndApplication(ApplicationId applicationId) { TenantName tenantName = applicationId.tenant(); if (!tenantRepository.checkThatTenantExists(tenantName)) { throw new IllegalArgumentException("Tenant " + tenantName + " was not found."); } Tenant tenant = tenantRepository.getTenant(tenantName); List<ApplicationId> applicationIds = listApplicationIds(tenant); if (!applicationIds.contains(applicationId)) { throw new IllegalArgumentException("No such application id: " + applicationId); } return tenant; } public ApplicationMetaData getMetadataFromSession(Tenant tenant, long sessionId) { return getLocalSession(tenant, sessionId).getMetaData(); } private void validateThatLocalSessionIsNotActive(Tenant tenant, long sessionId) { LocalSession session = getLocalSession(tenant, sessionId); if (Session.Status.ACTIVATE.equals(session.getStatus())) { throw new IllegalStateException("Session is active: " + sessionId); } } private LocalSession getLocalSession(Tenant tenant, long sessionId) { LocalSession session = tenant.getLocalSessionRepo().getSession(sessionId); if (session == null) throw new NotFoundException("Session " + sessionId + " was not found"); return session; } private RemoteSession getRemoteSession(Tenant tenant, long sessionId) { RemoteSession session = tenant.getRemoteSessionRepo().getSession(sessionId); if (session == null) throw new NotFoundException("Session " + sessionId + " was not found"); return session; } private Optional<ApplicationSet> getCurrentActiveApplicationSet(Tenant tenant, ApplicationId appId) { Optional<ApplicationSet> currentActiveApplicationSet = Optional.empty(); TenantApplications applicationRepo = tenant.getApplicationRepo(); try { long currentActiveSessionId = applicationRepo.getSessionIdForApplication(appId); RemoteSession currentActiveSession = getRemoteSession(tenant, currentActiveSessionId); if (currentActiveSession != null) { currentActiveApplicationSet = Optional.ofNullable(currentActiveSession.ensureApplicationLoaded()); } } catch (IllegalArgumentException e) { } return currentActiveApplicationSet; } private File decompressApplication(CompressedApplicationInputStream in, File tempDir) { try { return in.decompress(tempDir); } catch (IOException e) { throw new IllegalArgumentException("Unable to decompress stream", e); } } private List<ApplicationId> listApplicationIds(Tenant tenant) { TenantApplications applicationRepo = tenant.getApplicationRepo(); return applicationRepo.listApplications(); } private void cleanupTempDirectory(File tempDir) { logger.log(LogLevel.DEBUG, "Deleting tmp dir '" + tempDir + "'"); if (!IOUtils.recursiveDeleteDir(tempDir)) { logger.log(LogLevel.WARNING, "Not able to delete tmp dir '" + tempDir + "'"); } } void redeployAllApplications() throws InterruptedException { ExecutorService executor = Executors.newFixedThreadPool(configserverConfig.numParallelTenantLoaders(), new DaemonThreadFactory("redeploy apps")); Map<ApplicationId, Future<?>> futures = new HashMap<>(); tenantRepository.getAllTenants() .forEach(tenant -> listApplicationIds(tenant) .forEach(appId -> deployFromLocalActive(appId).ifPresent( deployment -> futures.put(appId,executor.submit(deployment::activate))))); for (Map.Entry<ApplicationId, Future<?>> f : futures.entrySet()) { try { f.getValue().get(); } catch (ExecutionException e) { throw new RuntimeException("Redeploying of " + f.getKey() + " failed", e); } } executor.shutdown(); executor.awaitTermination(365, TimeUnit.DAYS); } private LocalSession getExistingSession(Tenant tenant, ApplicationId applicationId) { TenantApplications applicationRepo = tenant.getApplicationRepo(); return getLocalSession(tenant, applicationRepo.getSessionIdForApplication(applicationId)); } private LocalSession getActiveSession(Tenant tenant, ApplicationId applicationId) { TenantApplications applicationRepo = tenant.getApplicationRepo(); if (applicationRepo.listApplications().contains(applicationId)) { return tenant.getLocalSessionRepo().getSession(applicationRepo.getSessionIdForApplication(applicationId)); } return null; } private static void logConfigChangeActions(ConfigChangeActions actions, DeployLogger logger) { RestartActions restartActions = actions.getRestartActions(); if ( ! restartActions.isEmpty()) { logger.log(Level.WARNING, "Change(s) between active and new application that require restart:\n" + restartActions.format()); } RefeedActions refeedActions = actions.getRefeedActions(); if ( ! refeedActions.isEmpty()) { boolean allAllowed = refeedActions.getEntries().stream().allMatch(RefeedActions.Entry::allowed); logger.log(allAllowed ? Level.INFO : Level.WARNING, "Change(s) between active and new application that may require re-feed:\n" + refeedActions.format()); } } /** Returns version to use when deploying application in given environment */ static Version decideVersion(ApplicationId application, Environment environment, Version targetVersion) { if (environment.isManuallyDeployed() && !"hosted-vespa".equals(application.tenant().value())) { return Vtag.currentVersion; } return targetVersion; } public Slime createDeployLog() { Slime deployLog = new Slime(); deployLog.setObject(); return deployLog; } }
I prefer to set this to a large value and instead just run the `maintain()` manually, then you wont need the `sleep()`
public void deleteTenantWithNoApplications() { Curator curator = new MockCurator(); GlobalComponentRegistry componentRegistry = new TestComponentRegistry.Builder().curator(curator).build(); TenantRepository tenantRepository = new TenantRepository(componentRegistry, false); ApplicationRepository applicationRepository = new ApplicationRepository(tenantRepository, new SessionHandlerTest.MockProvisioner(), Clock.systemUTC()); TenantName shouldBeDeleted = TenantName.from("to-be-deleted"); TenantName shouldNotBeDeleted = TenantName.from("should-not-be-deleted"); tenantRepository.addTenant(shouldBeDeleted); tenantRepository.addTenant(shouldNotBeDeleted); applicationRepository.deploy(new File("src/test/apps/app"), new PrepareParams.Builder() .applicationId(ApplicationId.from(shouldNotBeDeleted, ApplicationName.from("foo"), InstanceName.defaultName())) .build()); assertNotNull(tenantRepository.getTenant(shouldBeDeleted)); assertNotNull(tenantRepository.getTenant(shouldNotBeDeleted)); new TenantsMaintainer(applicationRepository, curator, Duration.ofMillis(10)); try { Thread.sleep(1000); } catch (InterruptedException e) { /* ignore */ } assertNull(tenantRepository.getTenant(shouldBeDeleted)); assertNotNull(tenantRepository.getTenant(shouldNotBeDeleted)); }
new TenantsMaintainer(applicationRepository, curator, Duration.ofMillis(10));
public void deleteTenantWithNoApplications() { Curator curator = new MockCurator(); GlobalComponentRegistry componentRegistry = new TestComponentRegistry.Builder().curator(curator).build(); TenantRepository tenantRepository = new TenantRepository(componentRegistry, false); ApplicationRepository applicationRepository = new ApplicationRepository(tenantRepository, new SessionHandlerTest.MockProvisioner(), Clock.systemUTC()); TenantName shouldBeDeleted = TenantName.from("to-be-deleted"); TenantName shouldNotBeDeleted = TenantName.from("should-not-be-deleted"); tenantRepository.addTenant(shouldBeDeleted); tenantRepository.addTenant(shouldNotBeDeleted); applicationRepository.deploy(new File("src/test/apps/app"), new PrepareParams.Builder() .applicationId(ApplicationId.from(shouldNotBeDeleted, ApplicationName.from("foo"), InstanceName.defaultName())) .build()); assertNotNull(tenantRepository.getTenant(shouldBeDeleted)); assertNotNull(tenantRepository.getTenant(shouldNotBeDeleted)); new TenantsMaintainer(applicationRepository, curator, Duration.ofDays(1)).run(); assertNull(tenantRepository.getTenant(shouldBeDeleted)); assertNotNull(tenantRepository.getTenant(shouldNotBeDeleted)); }
class TenantsMaintainerTest { @Test }
class TenantsMaintainerTest { @Test }
Yeah, good point, that's better, will update
public void deleteTenantWithNoApplications() { Curator curator = new MockCurator(); GlobalComponentRegistry componentRegistry = new TestComponentRegistry.Builder().curator(curator).build(); TenantRepository tenantRepository = new TenantRepository(componentRegistry, false); ApplicationRepository applicationRepository = new ApplicationRepository(tenantRepository, new SessionHandlerTest.MockProvisioner(), Clock.systemUTC()); TenantName shouldBeDeleted = TenantName.from("to-be-deleted"); TenantName shouldNotBeDeleted = TenantName.from("should-not-be-deleted"); tenantRepository.addTenant(shouldBeDeleted); tenantRepository.addTenant(shouldNotBeDeleted); applicationRepository.deploy(new File("src/test/apps/app"), new PrepareParams.Builder() .applicationId(ApplicationId.from(shouldNotBeDeleted, ApplicationName.from("foo"), InstanceName.defaultName())) .build()); assertNotNull(tenantRepository.getTenant(shouldBeDeleted)); assertNotNull(tenantRepository.getTenant(shouldNotBeDeleted)); new TenantsMaintainer(applicationRepository, curator, Duration.ofMillis(10)); try { Thread.sleep(1000); } catch (InterruptedException e) { /* ignore */ } assertNull(tenantRepository.getTenant(shouldBeDeleted)); assertNotNull(tenantRepository.getTenant(shouldNotBeDeleted)); }
new TenantsMaintainer(applicationRepository, curator, Duration.ofMillis(10));
public void deleteTenantWithNoApplications() { Curator curator = new MockCurator(); GlobalComponentRegistry componentRegistry = new TestComponentRegistry.Builder().curator(curator).build(); TenantRepository tenantRepository = new TenantRepository(componentRegistry, false); ApplicationRepository applicationRepository = new ApplicationRepository(tenantRepository, new SessionHandlerTest.MockProvisioner(), Clock.systemUTC()); TenantName shouldBeDeleted = TenantName.from("to-be-deleted"); TenantName shouldNotBeDeleted = TenantName.from("should-not-be-deleted"); tenantRepository.addTenant(shouldBeDeleted); tenantRepository.addTenant(shouldNotBeDeleted); applicationRepository.deploy(new File("src/test/apps/app"), new PrepareParams.Builder() .applicationId(ApplicationId.from(shouldNotBeDeleted, ApplicationName.from("foo"), InstanceName.defaultName())) .build()); assertNotNull(tenantRepository.getTenant(shouldBeDeleted)); assertNotNull(tenantRepository.getTenant(shouldNotBeDeleted)); new TenantsMaintainer(applicationRepository, curator, Duration.ofDays(1)).run(); assertNull(tenantRepository.getTenant(shouldBeDeleted)); assertNotNull(tenantRepository.getTenant(shouldNotBeDeleted)); }
class TenantsMaintainerTest { @Test }
class TenantsMaintainerTest { @Test }
Agree.
public void deleteTenantWithNoApplications() { Curator curator = new MockCurator(); GlobalComponentRegistry componentRegistry = new TestComponentRegistry.Builder().curator(curator).build(); TenantRepository tenantRepository = new TenantRepository(componentRegistry, false); ApplicationRepository applicationRepository = new ApplicationRepository(tenantRepository, new SessionHandlerTest.MockProvisioner(), Clock.systemUTC()); TenantName shouldBeDeleted = TenantName.from("to-be-deleted"); TenantName shouldNotBeDeleted = TenantName.from("should-not-be-deleted"); tenantRepository.addTenant(shouldBeDeleted); tenantRepository.addTenant(shouldNotBeDeleted); applicationRepository.deploy(new File("src/test/apps/app"), new PrepareParams.Builder() .applicationId(ApplicationId.from(shouldNotBeDeleted, ApplicationName.from("foo"), InstanceName.defaultName())) .build()); assertNotNull(tenantRepository.getTenant(shouldBeDeleted)); assertNotNull(tenantRepository.getTenant(shouldNotBeDeleted)); new TenantsMaintainer(applicationRepository, curator, Duration.ofMillis(10)); try { Thread.sleep(1000); } catch (InterruptedException e) { /* ignore */ } assertNull(tenantRepository.getTenant(shouldBeDeleted)); assertNotNull(tenantRepository.getTenant(shouldNotBeDeleted)); }
new TenantsMaintainer(applicationRepository, curator, Duration.ofMillis(10));
public void deleteTenantWithNoApplications() { Curator curator = new MockCurator(); GlobalComponentRegistry componentRegistry = new TestComponentRegistry.Builder().curator(curator).build(); TenantRepository tenantRepository = new TenantRepository(componentRegistry, false); ApplicationRepository applicationRepository = new ApplicationRepository(tenantRepository, new SessionHandlerTest.MockProvisioner(), Clock.systemUTC()); TenantName shouldBeDeleted = TenantName.from("to-be-deleted"); TenantName shouldNotBeDeleted = TenantName.from("should-not-be-deleted"); tenantRepository.addTenant(shouldBeDeleted); tenantRepository.addTenant(shouldNotBeDeleted); applicationRepository.deploy(new File("src/test/apps/app"), new PrepareParams.Builder() .applicationId(ApplicationId.from(shouldNotBeDeleted, ApplicationName.from("foo"), InstanceName.defaultName())) .build()); assertNotNull(tenantRepository.getTenant(shouldBeDeleted)); assertNotNull(tenantRepository.getTenant(shouldNotBeDeleted)); new TenantsMaintainer(applicationRepository, curator, Duration.ofDays(1)).run(); assertNull(tenantRepository.getTenant(shouldBeDeleted)); assertNotNull(tenantRepository.getTenant(shouldNotBeDeleted)); }
class TenantsMaintainerTest { @Test }
class TenantsMaintainerTest { @Test }
This check will be removed. Unless it's without value, please consider using [junit Rule](https://stackoverflow.com/a/6527990) to verify the exception message.
public void builder_cannot_be_reused() throws Exception { ApplicationBuilder builder = new ApplicationBuilder(); builder.servicesXml("<jdisc version=\"1.0\" />"); try (Application application = builder.build()) { } builder.servicesXml(""); }
builder.servicesXml("");
public void builder_cannot_be_reused() throws Exception { expectedException.expect(RuntimeException.class); expectedException.expectMessage(containsString("build method")); ApplicationBuilder builder = new ApplicationBuilder(); builder.servicesXml("<jdisc version=\"1.0\" />"); try (Application application = builder.build()) { } builder.servicesXml(""); }
class ApplicationBuilderTest { @Test public void query_profile_types_can_be_added() throws Exception { withApplicationBuilder(builder -> { builder.queryProfileType("MyProfileType", "<query-profile-type id=\"MyProfileType\">" + "<field name=\"age\" type=\"integer\" />" + "<field name=\"profession\" type=\"string\" />" + "<field name=\"user\" type=\"query-profile:MyUserProfile\" />" + "</query-profile-type>"); assertTrue(Files.exists(builder.getPath().resolve("search/query-profiles/types/MyProfileType.xml"))); }); } @Test public void query_profile_can_be_added() throws Exception { withApplicationBuilder(builder -> { builder.queryProfile("MyProfile", "<query-profile id=\"MyProfile\">" + "<field name=\"message\">Hello world!</field>" + "</query-profile>"); assertTrue(Files.exists(builder.getPath().resolve("search/query-profiles/MyProfile.xml"))); }); } @Test public void rank_expression_can_be_added() throws Exception { withApplicationBuilder(builder -> { builder.rankExpression("myExpression", "content"); assertTrue(Files.exists(builder.getPath().resolve("searchdefinitions/myExpression.expression"))); }); } @Test(expected = RuntimeException.class) @SuppressWarnings("try") private interface TestCase { public void accept(ApplicationBuilder ab) throws Exception; } public void withApplicationBuilder(TestCase f) throws Exception { ApplicationBuilder builder = new ApplicationBuilder(); try { f.accept(builder); } finally { IOUtils.recursiveDeleteDir(builder.getPath().toFile()); } } }
class ApplicationBuilderTest { @Test public void query_profile_types_can_be_added() throws Exception { withApplicationBuilder(builder -> { builder.queryProfileType("MyProfileType", "<query-profile-type id=\"MyProfileType\">" + "<field name=\"age\" type=\"integer\" />" + "<field name=\"profession\" type=\"string\" />" + "<field name=\"user\" type=\"query-profile:MyUserProfile\" />" + "</query-profile-type>"); assertTrue(Files.exists(builder.getPath().resolve("search/query-profiles/types/MyProfileType.xml"))); }); } @Test public void query_profile_can_be_added() throws Exception { withApplicationBuilder(builder -> { builder.queryProfile("MyProfile", "<query-profile id=\"MyProfile\">" + "<field name=\"message\">Hello world!</field>" + "</query-profile>"); assertTrue(Files.exists(builder.getPath().resolve("search/query-profiles/MyProfile.xml"))); }); } @Test public void rank_expression_can_be_added() throws Exception { withApplicationBuilder(builder -> { builder.rankExpression("myExpression", "content"); assertTrue(Files.exists(builder.getPath().resolve("searchdefinitions/myExpression.expression"))); }); } @Rule public ExpectedException expectedException = ExpectedException.none(); @Test @SuppressWarnings("try") private interface TestCase { void accept(ApplicationBuilder ab) throws Exception; } private static void withApplicationBuilder(TestCase f) throws Exception { ApplicationBuilder builder = new ApplicationBuilder(); try { f.accept(builder); } finally { IOUtils.recursiveDeleteDir(builder.getPath().toFile()); } } }
This was already checked in `queueFirstServerIfDown` ;0
public ProxyResponse handle(ProxyRequest proxyRequest) throws ProxyException { if (proxyRequest.isDiscoveryRequest()) { return createDiscoveryResponse(proxyRequest); } ZoneId zoneId = ZoneId.from(proxyRequest.getEnvironment(), proxyRequest.getRegion()); List<URI> allServers = zoneRegistry.getConfigServerVipUri(zoneId) .filter(zone -> zoneId.region().value().startsWith("aws-") || zoneId.region().value().startsWith("cd-aws-")) .map(Collections::singletonList) .orElseGet(() -> new ArrayList<>(zoneRegistry.getConfigServerUris(zoneId))); StringBuilder errorBuilder = new StringBuilder(); if (allServers.size() > 1 && queueFirstServerIfDown(allServers, proxyRequest)) { errorBuilder.append("Change ordering due to failed ping."); } for (URI uri : allServers) { Optional<ProxyResponse> proxyResponse = proxyCall(uri, proxyRequest, errorBuilder); if (proxyResponse.isPresent()) { return proxyResponse.get(); } } throw new ProxyException(ErrorResponse.internalServerError("Failed talking to config servers: " + errorBuilder.toString())); }
if (allServers.size() > 1 && queueFirstServerIfDown(allServers, proxyRequest)) {
public ProxyResponse handle(ProxyRequest proxyRequest) throws ProxyException { if (proxyRequest.isDiscoveryRequest()) { return createDiscoveryResponse(proxyRequest); } ZoneId zoneId = ZoneId.from(proxyRequest.getEnvironment(), proxyRequest.getRegion()); List<URI> allServers = zoneRegistry.getConfigServerVipUri(zoneId) .filter(zone -> zoneId.region().value().startsWith("aws-") || zoneId.region().value().startsWith("cd-aws-")) .map(Collections::singletonList) .orElseGet(() -> new ArrayList<>(zoneRegistry.getConfigServerUris(zoneId))); StringBuilder errorBuilder = new StringBuilder(); if (queueFirstServerIfDown(allServers, proxyRequest)) { errorBuilder.append("Change ordering due to failed ping."); } for (URI uri : allServers) { Optional<ProxyResponse> proxyResponse = proxyCall(uri, proxyRequest, errorBuilder); if (proxyResponse.isPresent()) { return proxyResponse.get(); } } throw new ProxyException(ErrorResponse.internalServerError("Failed talking to config servers: " + errorBuilder.toString())); }
class ConfigServerRestExecutorImpl implements ConfigServerRestExecutor { private static final Logger log = Logger.getLogger(ConfigServerRestExecutorImpl.class.getName()); private static final Duration PROXY_REQUEST_TIMEOUT = Duration.ofSeconds(10); private static final Set<String> HEADERS_TO_COPY = new HashSet<>(Arrays.asList("X-HTTP-Method-Override", "Content-Type")); private final ZoneRegistry zoneRegistry; private final ServiceIdentityProvider sslContextProvider; @Inject public ConfigServerRestExecutorImpl(ZoneRegistry zoneRegistry, ServiceIdentityProvider sslContextProvider) { this.zoneRegistry = zoneRegistry; this.sslContextProvider = sslContextProvider; } @Override private static class DiscoveryResponseStructure { public List<String> uris = new ArrayList<>(); } private ProxyResponse createDiscoveryResponse(ProxyRequest proxyRequest) { ObjectMapper mapper = new ObjectMapper(); DiscoveryResponseStructure responseStructure = new DiscoveryResponseStructure(); String environmentName = proxyRequest.getEnvironment(); ZoneList zones = zoneRegistry.zones().all(); if ( ! environmentName.isEmpty()) zones = zones.in(Environment.from(environmentName)); for (ZoneId zoneId : zones.ids()) { responseStructure.uris.add(proxyRequest.getScheme() + ": zoneId.environment().name() + "/" + zoneId.region().value()); } JsonNode node = mapper.valueToTree(responseStructure); return new ProxyResponse(proxyRequest, node.toString(), 200, Optional.empty(), "application/json"); } private static String removeFirstSlashIfAny(String url) { if (url.startsWith("/")) { return url.substring(1); } return url; } private Optional<ProxyResponse> proxyCall(URI uri, ProxyRequest proxyRequest, StringBuilder errorBuilder) throws ProxyException { String fullUri = uri.toString() + removeFirstSlashIfAny(proxyRequest.getConfigServerRequest()); final HttpRequestBase requestBase = createHttpBaseRequest( proxyRequest.getMethod(), fullUri, proxyRequest.getData()); copyHeaders(proxyRequest.getHeaders(), requestBase); RequestConfig config = RequestConfig.custom() .setConnectTimeout((int) PROXY_REQUEST_TIMEOUT.toMillis()) .setConnectionRequestTimeout((int) PROXY_REQUEST_TIMEOUT.toMillis()) .setSocketTimeout((int) PROXY_REQUEST_TIMEOUT.toMillis()).build(); try ( CloseableHttpClient client = createHttpClient(config, sslContextProvider, zoneRegistry, proxyRequest); CloseableHttpResponse response = client.execute(requestBase); ) { String content = getContent(response); int status = response.getStatusLine().getStatusCode(); if (status / 100 == 5) { errorBuilder.append("Talking to server ").append(uri.getHost()); errorBuilder.append(", got ").append(status).append(" ") .append(content).append("\n"); log.log(LogLevel.DEBUG, () -> String.format("Got response from %s with status code %d and content:\n %s", uri.getHost(), status, content)); return Optional.empty(); } final Header contentHeader = response.getLastHeader("Content-Type"); final String contentType; if (contentHeader != null && contentHeader.getValue() != null && ! contentHeader.getValue().isEmpty()) { contentType = contentHeader.getValue().replace("; charset=UTF-8",""); } else { contentType = "application/json"; } return Optional.of(new ProxyResponse(proxyRequest, content, status, Optional.of(uri), contentType)); } catch (Exception e) { errorBuilder.append("Talking to server ").append(uri.getHost()); errorBuilder.append(" got exception ").append(e.getMessage()); log.log(LogLevel.DEBUG, e, () -> "Got exception while sending request to " + uri.getHost()); return Optional.empty(); } } private static String getContent(CloseableHttpResponse response) { return Optional.ofNullable(response.getEntity()) .map(entity -> { try { return EntityUtils.toString(entity); } catch (IOException e) { throw new UncheckedIOException(e); } } ).orElse(""); } private static HttpRequestBase createHttpBaseRequest(String method, String uri, InputStream data) throws ProxyException { Method enumMethod = Method.valueOf(method); switch (enumMethod) { case GET: return new HttpGet(uri); case POST: HttpPost post = new HttpPost(uri); if (data != null) { post.setEntity(new InputStreamEntity(data)); } return post; case PUT: HttpPut put = new HttpPut(uri); if (data != null) { put.setEntity(new InputStreamEntity(data)); } return put; case DELETE: return new HttpDelete(uri); case PATCH: HttpPatch patch = new HttpPatch(uri); if (data != null) { patch.setEntity(new InputStreamEntity(data)); } return patch; default: throw new ProxyException(ErrorResponse.methodNotAllowed("Will not proxy such calls.")); } } private static void copyHeaders(Map<String, List<String>> headers, HttpRequestBase toRequest) { for (Map.Entry<String, List<String>> headerEntry : headers.entrySet()) { if (HEADERS_TO_COPY.contains(headerEntry.getKey())) { for (String value : headerEntry.getValue()) { toRequest.addHeader(headerEntry.getKey(), value); } } } } /** * During upgrade, one server can be down, this is normal. Therefor we do a quick ping on the first server, * if it is not responding, we try the other servers first. False positive/negatives are not critical, * but will increase latency to some extent. */ private boolean queueFirstServerIfDown(List<URI> allServers, ProxyRequest proxyRequest) { if (allServers.size() < 2) { return false; } URI uri = allServers.get(0); HttpGet httpget = new HttpGet(uri); int timeout = 500; RequestConfig config = RequestConfig.custom() .setConnectTimeout(timeout) .setConnectionRequestTimeout(timeout) .setSocketTimeout(timeout).build(); try ( CloseableHttpClient client = createHttpClient(config, sslContextProvider, zoneRegistry, proxyRequest); CloseableHttpResponse response = client.execute(httpget); ) { if (response.getStatusLine().getStatusCode() == 200) { return false; } } catch (IOException e) { } allServers.remove(0); allServers.add(uri); return true; } private static CloseableHttpClient createHttpClient(RequestConfig config, ServiceIdentityProvider sslContextProvider, ZoneRegistry zoneRegistry, ProxyRequest proxyRequest) { AthenzIdentityVerifier hostnameVerifier = new AthenzIdentityVerifier( singleton( zoneRegistry.getConfigServerAthenzService( ZoneId.from(proxyRequest.getEnvironment(), proxyRequest.getRegion())))); return HttpClientBuilder.create() .setUserAgent("config-server-proxy-client") .setSslcontext(sslContextProvider.getIdentitySslContext()) .setHostnameVerifier(new AthenzIdentityVerifierAdapter(hostnameVerifier)) .setDefaultRequestConfig(config) .build(); } private static class AthenzIdentityVerifierAdapter implements X509HostnameVerifier { private final AthenzIdentityVerifier verifier; AthenzIdentityVerifierAdapter(AthenzIdentityVerifier verifier) { this.verifier = verifier; } @Override public boolean verify(String hostname, SSLSession sslSession) { return verifier.verify(hostname, sslSession); } @Override public void verify(String host, SSLSocket ssl) { /* All sockets accepted */} @Override public void verify(String hostname, X509Certificate certificate) throws SSLException { AthenzIdentity identity = AthenzIdentities.from(certificate); if (!verifier.isTrusted(identity)) { throw new SSLException("Athenz identity is not trusted: " + identity.getFullName()); } } @Override public void verify(String hostname, String[] cns, String[] subjectAlts) throws SSLException { AthenzIdentity identity = AthenzIdentities.from(cns[0]); if (!verifier.isTrusted(identity)) { throw new SSLException("Athenz identity is not trusted: " + identity.getFullName()); } } } }
class ConfigServerRestExecutorImpl implements ConfigServerRestExecutor { private static final Logger log = Logger.getLogger(ConfigServerRestExecutorImpl.class.getName()); private static final Duration PROXY_REQUEST_TIMEOUT = Duration.ofSeconds(10); private static final Set<String> HEADERS_TO_COPY = new HashSet<>(Arrays.asList("X-HTTP-Method-Override", "Content-Type")); private final ZoneRegistry zoneRegistry; private final ServiceIdentityProvider sslContextProvider; @Inject public ConfigServerRestExecutorImpl(ZoneRegistry zoneRegistry, ServiceIdentityProvider sslContextProvider) { this.zoneRegistry = zoneRegistry; this.sslContextProvider = sslContextProvider; } @Override private static class DiscoveryResponseStructure { public List<String> uris = new ArrayList<>(); } private ProxyResponse createDiscoveryResponse(ProxyRequest proxyRequest) { ObjectMapper mapper = new ObjectMapper(); DiscoveryResponseStructure responseStructure = new DiscoveryResponseStructure(); String environmentName = proxyRequest.getEnvironment(); ZoneList zones = zoneRegistry.zones().all(); if ( ! environmentName.isEmpty()) zones = zones.in(Environment.from(environmentName)); for (ZoneId zoneId : zones.ids()) { responseStructure.uris.add(proxyRequest.getScheme() + ": zoneId.environment().name() + "/" + zoneId.region().value()); } JsonNode node = mapper.valueToTree(responseStructure); return new ProxyResponse(proxyRequest, node.toString(), 200, Optional.empty(), "application/json"); } private static String removeFirstSlashIfAny(String url) { if (url.startsWith("/")) { return url.substring(1); } return url; } private Optional<ProxyResponse> proxyCall(URI uri, ProxyRequest proxyRequest, StringBuilder errorBuilder) throws ProxyException { String fullUri = uri.toString() + removeFirstSlashIfAny(proxyRequest.getConfigServerRequest()); final HttpRequestBase requestBase = createHttpBaseRequest( proxyRequest.getMethod(), fullUri, proxyRequest.getData()); copyHeaders(proxyRequest.getHeaders(), requestBase); RequestConfig config = RequestConfig.custom() .setConnectTimeout((int) PROXY_REQUEST_TIMEOUT.toMillis()) .setConnectionRequestTimeout((int) PROXY_REQUEST_TIMEOUT.toMillis()) .setSocketTimeout((int) PROXY_REQUEST_TIMEOUT.toMillis()).build(); try ( CloseableHttpClient client = createHttpClient(config, sslContextProvider, zoneRegistry, proxyRequest); CloseableHttpResponse response = client.execute(requestBase); ) { String content = getContent(response); int status = response.getStatusLine().getStatusCode(); if (status / 100 == 5) { errorBuilder.append("Talking to server ").append(uri.getHost()); errorBuilder.append(", got ").append(status).append(" ") .append(content).append("\n"); log.log(LogLevel.DEBUG, () -> String.format("Got response from %s with status code %d and content:\n %s", uri.getHost(), status, content)); return Optional.empty(); } final Header contentHeader = response.getLastHeader("Content-Type"); final String contentType; if (contentHeader != null && contentHeader.getValue() != null && ! contentHeader.getValue().isEmpty()) { contentType = contentHeader.getValue().replace("; charset=UTF-8",""); } else { contentType = "application/json"; } return Optional.of(new ProxyResponse(proxyRequest, content, status, Optional.of(uri), contentType)); } catch (Exception e) { errorBuilder.append("Talking to server ").append(uri.getHost()); errorBuilder.append(" got exception ").append(e.getMessage()); log.log(LogLevel.DEBUG, e, () -> "Got exception while sending request to " + uri.getHost()); return Optional.empty(); } } private static String getContent(CloseableHttpResponse response) { return Optional.ofNullable(response.getEntity()) .map(entity -> { try { return EntityUtils.toString(entity); } catch (IOException e) { throw new UncheckedIOException(e); } } ).orElse(""); } private static HttpRequestBase createHttpBaseRequest(String method, String uri, InputStream data) throws ProxyException { Method enumMethod = Method.valueOf(method); switch (enumMethod) { case GET: return new HttpGet(uri); case POST: HttpPost post = new HttpPost(uri); if (data != null) { post.setEntity(new InputStreamEntity(data)); } return post; case PUT: HttpPut put = new HttpPut(uri); if (data != null) { put.setEntity(new InputStreamEntity(data)); } return put; case DELETE: return new HttpDelete(uri); case PATCH: HttpPatch patch = new HttpPatch(uri); if (data != null) { patch.setEntity(new InputStreamEntity(data)); } return patch; default: throw new ProxyException(ErrorResponse.methodNotAllowed("Will not proxy such calls.")); } } private static void copyHeaders(Map<String, List<String>> headers, HttpRequestBase toRequest) { for (Map.Entry<String, List<String>> headerEntry : headers.entrySet()) { if (HEADERS_TO_COPY.contains(headerEntry.getKey())) { for (String value : headerEntry.getValue()) { toRequest.addHeader(headerEntry.getKey(), value); } } } } /** * During upgrade, one server can be down, this is normal. Therefor we do a quick ping on the first server, * if it is not responding, we try the other servers first. False positive/negatives are not critical, * but will increase latency to some extent. */ private boolean queueFirstServerIfDown(List<URI> allServers, ProxyRequest proxyRequest) { if (allServers.size() < 2) { return false; } URI uri = allServers.get(0); HttpGet httpget = new HttpGet(uri); int timeout = 500; RequestConfig config = RequestConfig.custom() .setConnectTimeout(timeout) .setConnectionRequestTimeout(timeout) .setSocketTimeout(timeout).build(); try ( CloseableHttpClient client = createHttpClient(config, sslContextProvider, zoneRegistry, proxyRequest); CloseableHttpResponse response = client.execute(httpget); ) { if (response.getStatusLine().getStatusCode() == 200) { return false; } } catch (IOException e) { } allServers.remove(0); allServers.add(uri); return true; } private static CloseableHttpClient createHttpClient(RequestConfig config, ServiceIdentityProvider sslContextProvider, ZoneRegistry zoneRegistry, ProxyRequest proxyRequest) { AthenzIdentityVerifier hostnameVerifier = new AthenzIdentityVerifier( singleton( zoneRegistry.getConfigServerAthenzService( ZoneId.from(proxyRequest.getEnvironment(), proxyRequest.getRegion())))); return HttpClientBuilder.create() .setUserAgent("config-server-proxy-client") .setSslcontext(sslContextProvider.getIdentitySslContext()) .setHostnameVerifier(new AthenzIdentityVerifierAdapter(hostnameVerifier)) .setDefaultRequestConfig(config) .build(); } private static class AthenzIdentityVerifierAdapter implements X509HostnameVerifier { private final AthenzIdentityVerifier verifier; AthenzIdentityVerifierAdapter(AthenzIdentityVerifier verifier) { this.verifier = verifier; } @Override public boolean verify(String hostname, SSLSession sslSession) { return verifier.verify(hostname, sslSession); } @Override public void verify(String host, SSLSocket ssl) { /* All sockets accepted */} @Override public void verify(String hostname, X509Certificate certificate) throws SSLException { AthenzIdentity identity = AthenzIdentities.from(certificate); if (!verifier.isTrusted(identity)) { throw new SSLException("Athenz identity is not trusted: " + identity.getFullName()); } } @Override public void verify(String hostname, String[] cns, String[] subjectAlts) throws SSLException { AthenzIdentity identity = AthenzIdentities.from(cns[0]); if (!verifier.isTrusted(identity)) { throw new SSLException("Athenz identity is not trusted: " + identity.getFullName()); } } } }
Nice, removed.
public ProxyResponse handle(ProxyRequest proxyRequest) throws ProxyException { if (proxyRequest.isDiscoveryRequest()) { return createDiscoveryResponse(proxyRequest); } ZoneId zoneId = ZoneId.from(proxyRequest.getEnvironment(), proxyRequest.getRegion()); List<URI> allServers = zoneRegistry.getConfigServerVipUri(zoneId) .filter(zone -> zoneId.region().value().startsWith("aws-") || zoneId.region().value().startsWith("cd-aws-")) .map(Collections::singletonList) .orElseGet(() -> new ArrayList<>(zoneRegistry.getConfigServerUris(zoneId))); StringBuilder errorBuilder = new StringBuilder(); if (allServers.size() > 1 && queueFirstServerIfDown(allServers, proxyRequest)) { errorBuilder.append("Change ordering due to failed ping."); } for (URI uri : allServers) { Optional<ProxyResponse> proxyResponse = proxyCall(uri, proxyRequest, errorBuilder); if (proxyResponse.isPresent()) { return proxyResponse.get(); } } throw new ProxyException(ErrorResponse.internalServerError("Failed talking to config servers: " + errorBuilder.toString())); }
if (allServers.size() > 1 && queueFirstServerIfDown(allServers, proxyRequest)) {
public ProxyResponse handle(ProxyRequest proxyRequest) throws ProxyException { if (proxyRequest.isDiscoveryRequest()) { return createDiscoveryResponse(proxyRequest); } ZoneId zoneId = ZoneId.from(proxyRequest.getEnvironment(), proxyRequest.getRegion()); List<URI> allServers = zoneRegistry.getConfigServerVipUri(zoneId) .filter(zone -> zoneId.region().value().startsWith("aws-") || zoneId.region().value().startsWith("cd-aws-")) .map(Collections::singletonList) .orElseGet(() -> new ArrayList<>(zoneRegistry.getConfigServerUris(zoneId))); StringBuilder errorBuilder = new StringBuilder(); if (queueFirstServerIfDown(allServers, proxyRequest)) { errorBuilder.append("Change ordering due to failed ping."); } for (URI uri : allServers) { Optional<ProxyResponse> proxyResponse = proxyCall(uri, proxyRequest, errorBuilder); if (proxyResponse.isPresent()) { return proxyResponse.get(); } } throw new ProxyException(ErrorResponse.internalServerError("Failed talking to config servers: " + errorBuilder.toString())); }
class ConfigServerRestExecutorImpl implements ConfigServerRestExecutor { private static final Logger log = Logger.getLogger(ConfigServerRestExecutorImpl.class.getName()); private static final Duration PROXY_REQUEST_TIMEOUT = Duration.ofSeconds(10); private static final Set<String> HEADERS_TO_COPY = new HashSet<>(Arrays.asList("X-HTTP-Method-Override", "Content-Type")); private final ZoneRegistry zoneRegistry; private final ServiceIdentityProvider sslContextProvider; @Inject public ConfigServerRestExecutorImpl(ZoneRegistry zoneRegistry, ServiceIdentityProvider sslContextProvider) { this.zoneRegistry = zoneRegistry; this.sslContextProvider = sslContextProvider; } @Override private static class DiscoveryResponseStructure { public List<String> uris = new ArrayList<>(); } private ProxyResponse createDiscoveryResponse(ProxyRequest proxyRequest) { ObjectMapper mapper = new ObjectMapper(); DiscoveryResponseStructure responseStructure = new DiscoveryResponseStructure(); String environmentName = proxyRequest.getEnvironment(); ZoneList zones = zoneRegistry.zones().all(); if ( ! environmentName.isEmpty()) zones = zones.in(Environment.from(environmentName)); for (ZoneId zoneId : zones.ids()) { responseStructure.uris.add(proxyRequest.getScheme() + ": zoneId.environment().name() + "/" + zoneId.region().value()); } JsonNode node = mapper.valueToTree(responseStructure); return new ProxyResponse(proxyRequest, node.toString(), 200, Optional.empty(), "application/json"); } private static String removeFirstSlashIfAny(String url) { if (url.startsWith("/")) { return url.substring(1); } return url; } private Optional<ProxyResponse> proxyCall(URI uri, ProxyRequest proxyRequest, StringBuilder errorBuilder) throws ProxyException { String fullUri = uri.toString() + removeFirstSlashIfAny(proxyRequest.getConfigServerRequest()); final HttpRequestBase requestBase = createHttpBaseRequest( proxyRequest.getMethod(), fullUri, proxyRequest.getData()); copyHeaders(proxyRequest.getHeaders(), requestBase); RequestConfig config = RequestConfig.custom() .setConnectTimeout((int) PROXY_REQUEST_TIMEOUT.toMillis()) .setConnectionRequestTimeout((int) PROXY_REQUEST_TIMEOUT.toMillis()) .setSocketTimeout((int) PROXY_REQUEST_TIMEOUT.toMillis()).build(); try ( CloseableHttpClient client = createHttpClient(config, sslContextProvider, zoneRegistry, proxyRequest); CloseableHttpResponse response = client.execute(requestBase); ) { String content = getContent(response); int status = response.getStatusLine().getStatusCode(); if (status / 100 == 5) { errorBuilder.append("Talking to server ").append(uri.getHost()); errorBuilder.append(", got ").append(status).append(" ") .append(content).append("\n"); log.log(LogLevel.DEBUG, () -> String.format("Got response from %s with status code %d and content:\n %s", uri.getHost(), status, content)); return Optional.empty(); } final Header contentHeader = response.getLastHeader("Content-Type"); final String contentType; if (contentHeader != null && contentHeader.getValue() != null && ! contentHeader.getValue().isEmpty()) { contentType = contentHeader.getValue().replace("; charset=UTF-8",""); } else { contentType = "application/json"; } return Optional.of(new ProxyResponse(proxyRequest, content, status, Optional.of(uri), contentType)); } catch (Exception e) { errorBuilder.append("Talking to server ").append(uri.getHost()); errorBuilder.append(" got exception ").append(e.getMessage()); log.log(LogLevel.DEBUG, e, () -> "Got exception while sending request to " + uri.getHost()); return Optional.empty(); } } private static String getContent(CloseableHttpResponse response) { return Optional.ofNullable(response.getEntity()) .map(entity -> { try { return EntityUtils.toString(entity); } catch (IOException e) { throw new UncheckedIOException(e); } } ).orElse(""); } private static HttpRequestBase createHttpBaseRequest(String method, String uri, InputStream data) throws ProxyException { Method enumMethod = Method.valueOf(method); switch (enumMethod) { case GET: return new HttpGet(uri); case POST: HttpPost post = new HttpPost(uri); if (data != null) { post.setEntity(new InputStreamEntity(data)); } return post; case PUT: HttpPut put = new HttpPut(uri); if (data != null) { put.setEntity(new InputStreamEntity(data)); } return put; case DELETE: return new HttpDelete(uri); case PATCH: HttpPatch patch = new HttpPatch(uri); if (data != null) { patch.setEntity(new InputStreamEntity(data)); } return patch; default: throw new ProxyException(ErrorResponse.methodNotAllowed("Will not proxy such calls.")); } } private static void copyHeaders(Map<String, List<String>> headers, HttpRequestBase toRequest) { for (Map.Entry<String, List<String>> headerEntry : headers.entrySet()) { if (HEADERS_TO_COPY.contains(headerEntry.getKey())) { for (String value : headerEntry.getValue()) { toRequest.addHeader(headerEntry.getKey(), value); } } } } /** * During upgrade, one server can be down, this is normal. Therefor we do a quick ping on the first server, * if it is not responding, we try the other servers first. False positive/negatives are not critical, * but will increase latency to some extent. */ private boolean queueFirstServerIfDown(List<URI> allServers, ProxyRequest proxyRequest) { if (allServers.size() < 2) { return false; } URI uri = allServers.get(0); HttpGet httpget = new HttpGet(uri); int timeout = 500; RequestConfig config = RequestConfig.custom() .setConnectTimeout(timeout) .setConnectionRequestTimeout(timeout) .setSocketTimeout(timeout).build(); try ( CloseableHttpClient client = createHttpClient(config, sslContextProvider, zoneRegistry, proxyRequest); CloseableHttpResponse response = client.execute(httpget); ) { if (response.getStatusLine().getStatusCode() == 200) { return false; } } catch (IOException e) { } allServers.remove(0); allServers.add(uri); return true; } private static CloseableHttpClient createHttpClient(RequestConfig config, ServiceIdentityProvider sslContextProvider, ZoneRegistry zoneRegistry, ProxyRequest proxyRequest) { AthenzIdentityVerifier hostnameVerifier = new AthenzIdentityVerifier( singleton( zoneRegistry.getConfigServerAthenzService( ZoneId.from(proxyRequest.getEnvironment(), proxyRequest.getRegion())))); return HttpClientBuilder.create() .setUserAgent("config-server-proxy-client") .setSslcontext(sslContextProvider.getIdentitySslContext()) .setHostnameVerifier(new AthenzIdentityVerifierAdapter(hostnameVerifier)) .setDefaultRequestConfig(config) .build(); } private static class AthenzIdentityVerifierAdapter implements X509HostnameVerifier { private final AthenzIdentityVerifier verifier; AthenzIdentityVerifierAdapter(AthenzIdentityVerifier verifier) { this.verifier = verifier; } @Override public boolean verify(String hostname, SSLSession sslSession) { return verifier.verify(hostname, sslSession); } @Override public void verify(String host, SSLSocket ssl) { /* All sockets accepted */} @Override public void verify(String hostname, X509Certificate certificate) throws SSLException { AthenzIdentity identity = AthenzIdentities.from(certificate); if (!verifier.isTrusted(identity)) { throw new SSLException("Athenz identity is not trusted: " + identity.getFullName()); } } @Override public void verify(String hostname, String[] cns, String[] subjectAlts) throws SSLException { AthenzIdentity identity = AthenzIdentities.from(cns[0]); if (!verifier.isTrusted(identity)) { throw new SSLException("Athenz identity is not trusted: " + identity.getFullName()); } } } }
class ConfigServerRestExecutorImpl implements ConfigServerRestExecutor { private static final Logger log = Logger.getLogger(ConfigServerRestExecutorImpl.class.getName()); private static final Duration PROXY_REQUEST_TIMEOUT = Duration.ofSeconds(10); private static final Set<String> HEADERS_TO_COPY = new HashSet<>(Arrays.asList("X-HTTP-Method-Override", "Content-Type")); private final ZoneRegistry zoneRegistry; private final ServiceIdentityProvider sslContextProvider; @Inject public ConfigServerRestExecutorImpl(ZoneRegistry zoneRegistry, ServiceIdentityProvider sslContextProvider) { this.zoneRegistry = zoneRegistry; this.sslContextProvider = sslContextProvider; } @Override private static class DiscoveryResponseStructure { public List<String> uris = new ArrayList<>(); } private ProxyResponse createDiscoveryResponse(ProxyRequest proxyRequest) { ObjectMapper mapper = new ObjectMapper(); DiscoveryResponseStructure responseStructure = new DiscoveryResponseStructure(); String environmentName = proxyRequest.getEnvironment(); ZoneList zones = zoneRegistry.zones().all(); if ( ! environmentName.isEmpty()) zones = zones.in(Environment.from(environmentName)); for (ZoneId zoneId : zones.ids()) { responseStructure.uris.add(proxyRequest.getScheme() + ": zoneId.environment().name() + "/" + zoneId.region().value()); } JsonNode node = mapper.valueToTree(responseStructure); return new ProxyResponse(proxyRequest, node.toString(), 200, Optional.empty(), "application/json"); } private static String removeFirstSlashIfAny(String url) { if (url.startsWith("/")) { return url.substring(1); } return url; } private Optional<ProxyResponse> proxyCall(URI uri, ProxyRequest proxyRequest, StringBuilder errorBuilder) throws ProxyException { String fullUri = uri.toString() + removeFirstSlashIfAny(proxyRequest.getConfigServerRequest()); final HttpRequestBase requestBase = createHttpBaseRequest( proxyRequest.getMethod(), fullUri, proxyRequest.getData()); copyHeaders(proxyRequest.getHeaders(), requestBase); RequestConfig config = RequestConfig.custom() .setConnectTimeout((int) PROXY_REQUEST_TIMEOUT.toMillis()) .setConnectionRequestTimeout((int) PROXY_REQUEST_TIMEOUT.toMillis()) .setSocketTimeout((int) PROXY_REQUEST_TIMEOUT.toMillis()).build(); try ( CloseableHttpClient client = createHttpClient(config, sslContextProvider, zoneRegistry, proxyRequest); CloseableHttpResponse response = client.execute(requestBase); ) { String content = getContent(response); int status = response.getStatusLine().getStatusCode(); if (status / 100 == 5) { errorBuilder.append("Talking to server ").append(uri.getHost()); errorBuilder.append(", got ").append(status).append(" ") .append(content).append("\n"); log.log(LogLevel.DEBUG, () -> String.format("Got response from %s with status code %d and content:\n %s", uri.getHost(), status, content)); return Optional.empty(); } final Header contentHeader = response.getLastHeader("Content-Type"); final String contentType; if (contentHeader != null && contentHeader.getValue() != null && ! contentHeader.getValue().isEmpty()) { contentType = contentHeader.getValue().replace("; charset=UTF-8",""); } else { contentType = "application/json"; } return Optional.of(new ProxyResponse(proxyRequest, content, status, Optional.of(uri), contentType)); } catch (Exception e) { errorBuilder.append("Talking to server ").append(uri.getHost()); errorBuilder.append(" got exception ").append(e.getMessage()); log.log(LogLevel.DEBUG, e, () -> "Got exception while sending request to " + uri.getHost()); return Optional.empty(); } } private static String getContent(CloseableHttpResponse response) { return Optional.ofNullable(response.getEntity()) .map(entity -> { try { return EntityUtils.toString(entity); } catch (IOException e) { throw new UncheckedIOException(e); } } ).orElse(""); } private static HttpRequestBase createHttpBaseRequest(String method, String uri, InputStream data) throws ProxyException { Method enumMethod = Method.valueOf(method); switch (enumMethod) { case GET: return new HttpGet(uri); case POST: HttpPost post = new HttpPost(uri); if (data != null) { post.setEntity(new InputStreamEntity(data)); } return post; case PUT: HttpPut put = new HttpPut(uri); if (data != null) { put.setEntity(new InputStreamEntity(data)); } return put; case DELETE: return new HttpDelete(uri); case PATCH: HttpPatch patch = new HttpPatch(uri); if (data != null) { patch.setEntity(new InputStreamEntity(data)); } return patch; default: throw new ProxyException(ErrorResponse.methodNotAllowed("Will not proxy such calls.")); } } private static void copyHeaders(Map<String, List<String>> headers, HttpRequestBase toRequest) { for (Map.Entry<String, List<String>> headerEntry : headers.entrySet()) { if (HEADERS_TO_COPY.contains(headerEntry.getKey())) { for (String value : headerEntry.getValue()) { toRequest.addHeader(headerEntry.getKey(), value); } } } } /** * During upgrade, one server can be down, this is normal. Therefor we do a quick ping on the first server, * if it is not responding, we try the other servers first. False positive/negatives are not critical, * but will increase latency to some extent. */ private boolean queueFirstServerIfDown(List<URI> allServers, ProxyRequest proxyRequest) { if (allServers.size() < 2) { return false; } URI uri = allServers.get(0); HttpGet httpget = new HttpGet(uri); int timeout = 500; RequestConfig config = RequestConfig.custom() .setConnectTimeout(timeout) .setConnectionRequestTimeout(timeout) .setSocketTimeout(timeout).build(); try ( CloseableHttpClient client = createHttpClient(config, sslContextProvider, zoneRegistry, proxyRequest); CloseableHttpResponse response = client.execute(httpget); ) { if (response.getStatusLine().getStatusCode() == 200) { return false; } } catch (IOException e) { } allServers.remove(0); allServers.add(uri); return true; } private static CloseableHttpClient createHttpClient(RequestConfig config, ServiceIdentityProvider sslContextProvider, ZoneRegistry zoneRegistry, ProxyRequest proxyRequest) { AthenzIdentityVerifier hostnameVerifier = new AthenzIdentityVerifier( singleton( zoneRegistry.getConfigServerAthenzService( ZoneId.from(proxyRequest.getEnvironment(), proxyRequest.getRegion())))); return HttpClientBuilder.create() .setUserAgent("config-server-proxy-client") .setSslcontext(sslContextProvider.getIdentitySslContext()) .setHostnameVerifier(new AthenzIdentityVerifierAdapter(hostnameVerifier)) .setDefaultRequestConfig(config) .build(); } private static class AthenzIdentityVerifierAdapter implements X509HostnameVerifier { private final AthenzIdentityVerifier verifier; AthenzIdentityVerifierAdapter(AthenzIdentityVerifier verifier) { this.verifier = verifier; } @Override public boolean verify(String hostname, SSLSession sslSession) { return verifier.verify(hostname, sslSession); } @Override public void verify(String host, SSLSocket ssl) { /* All sockets accepted */} @Override public void verify(String hostname, X509Certificate certificate) throws SSLException { AthenzIdentity identity = AthenzIdentities.from(certificate); if (!verifier.isTrusted(identity)) { throw new SSLException("Athenz identity is not trusted: " + identity.getFullName()); } } @Override public void verify(String hostname, String[] cns, String[] subjectAlts) throws SSLException { AthenzIdentity identity = AthenzIdentities.from(cns[0]); if (!verifier.isTrusted(identity)) { throw new SSLException("Athenz identity is not trusted: " + identity.getFullName()); } } } }
This test is ignored, probably needs to be re-written entirely.
public void test() throws InterruptedException { try (DockerTester dockerTester = new DockerTester()) { dockerTester.addNodeRepositoryNode(createNodeRepositoryNode()); while (dockerTester.nodeAdmin.getListOfHosts().size() == 0) { Thread.sleep(10); } dockerTester.callOrderVerifier.assertInOrder( "createContainerCommand with DockerImage { imageId=dockerImage }, HostName: host1.test.yahoo.com, ContainerName { name=host1 }", "updateNodeAttributes with HostName: host1.test.yahoo.com, NodeAttributes{restartGeneration=1, rebootGeneration=null, dockerImage=dockerImage, vespaVersion='null'}"); NodeAdminStateUpdaterImpl updater = dockerTester.nodeAdminStateUpdater; updater.setResumeStateAndCheckIfResumed(NodeAdminStateUpdater.State.SUSPENDED); NodeAdmin nodeAdmin = dockerTester.nodeAdmin; while ( ! nodeAdmin.isFrozen()) { System.out.println("Node admin not frozen yet"); Thread.sleep(10); } assertTrue(nodeAdmin.setFrozen(false)); dockerTester.callOrderVerifier.assertInOrder( "executeInContainer with ContainerName { name=host1 }, args: [" + DockerTester.NODE_PROGRAM + ", stop]"); } }
public void test() throws InterruptedException { try (DockerTester dockerTester = new DockerTester()) { dockerTester.addNodeRepositoryNode(createNodeRepositoryNode()); while (dockerTester.nodeAdmin.getListOfHosts().size() == 0) { Thread.sleep(10); } dockerTester.callOrderVerifier.assertInOrder( "createContainerCommand with DockerImage { imageId=dockerImage }, HostName: host1.test.yahoo.com, ContainerName { name=host1 }", "updateNodeAttributes with HostName: host1.test.yahoo.com, NodeAttributes{restartGeneration=1, rebootGeneration=null, dockerImage=dockerImage, vespaVersion='null'}"); NodeAdminStateUpdaterImpl updater = dockerTester.nodeAdminStateUpdater; updater.setResumeStateAndCheckIfResumed(NodeAdminStateUpdater.State.SUSPENDED); NodeAdmin nodeAdmin = dockerTester.nodeAdmin; while ( ! nodeAdmin.isFrozen()) { System.out.println("Node admin not frozen yet"); Thread.sleep(10); } assertTrue(nodeAdmin.setFrozen(false)); dockerTester.callOrderVerifier.assertInOrder( "executeInContainer with ContainerName { name=host1 }, args: [" + DockerTester.NODE_PROGRAM + ", stop]"); } }
class RebootTest { @Test @Ignore private NodeSpec createNodeRepositoryNode() { return new NodeSpec.Builder() .hostname("host1.test.yahoo.com") .wantedDockerImage(new DockerImage("dockerImage")) .state(Node.State.active) .nodeType(NodeType.tenant) .flavor("docker") .vespaVersion("6.50.0") .wantedRestartGeneration(1L) .currentRestartGeneration(1L) .build(); } }
class RebootTest { @Test @Ignore private NodeSpec createNodeRepositoryNode() { return new NodeSpec.Builder() .hostname("host1.test.yahoo.com") .wantedDockerImage(new DockerImage("dockerImage")) .state(Node.State.active) .nodeType(NodeType.tenant) .flavor("docker") .vespaVersion("6.50.0") .wantedRestartGeneration(1L) .currentRestartGeneration(1L) .build(); } }
What's the deal with the trailing `//`?
public static String getBuilder(InnerCNode node) { return getDeclaration(node) + "\n" + indentCode(INDENTATION, getUninitializedScalars(node) + "\n\n" + stream(node.getChildren()).map(BuilderGenerator::getBuilderFieldDefinition).collect(Collectors.joining("\n")) + "\n\n" + getBuilderConstructors(node, nodeClass(node)) + "\n\n" + getOverrideMethod(node) + "\n\n" + getBuilderSetters(node) + "\n" + getSpecialRootBuilderCode(node)) + "}";
return getDeclaration(node) + "\n" +
public static String getBuilder(InnerCNode node) { return getDeclaration(node) + "\n" + indentCode(INDENTATION, getUninitializedScalars(node) + "\n\n" + stream(node.getChildren()).map(BuilderGenerator::getBuilderFieldDefinition).collect(Collectors.joining("\n")) + "\n\n" + getBuilderConstructors(node, nodeClass(node)) + "\n\n" + getOverrideMethod(node) + "\n\n" + getBuilderSetters(node) + "\n" + getSpecialRootBuilderCode(node)) + "}
class BuilderGenerator { "; }
class BuilderGenerator { "; }
Maybe a bit of Eclipse heritage showing. The default formatter in Eclipse will rejoin lines if they fit, so when you have a long concatenation like a template the // is to maintain the line splits.
public static String getBuilder(InnerCNode node) { return getDeclaration(node) + "\n" + indentCode(INDENTATION, getUninitializedScalars(node) + "\n\n" + stream(node.getChildren()).map(BuilderGenerator::getBuilderFieldDefinition).collect(Collectors.joining("\n")) + "\n\n" + getBuilderConstructors(node, nodeClass(node)) + "\n\n" + getOverrideMethod(node) + "\n\n" + getBuilderSetters(node) + "\n" + getSpecialRootBuilderCode(node)) + "}";
return getDeclaration(node) + "\n" +
public static String getBuilder(InnerCNode node) { return getDeclaration(node) + "\n" + indentCode(INDENTATION, getUninitializedScalars(node) + "\n\n" + stream(node.getChildren()).map(BuilderGenerator::getBuilderFieldDefinition).collect(Collectors.joining("\n")) + "\n\n" + getBuilderConstructors(node, nodeClass(node)) + "\n\n" + getOverrideMethod(node) + "\n\n" + getBuilderSetters(node) + "\n" + getSpecialRootBuilderCode(node)) + "}
class BuilderGenerator { "; }
class BuilderGenerator { "; }
Consider moving guard to the top of the method.
public Set<String> deleteUnusedFiledistributionReferences(File fileReferencesPath, boolean deleteFromDisk) { Set<String> fileReferencesInUse = new HashSet<>(); Set<ApplicationId> applicationIds = listApplications(); applicationIds.forEach(applicationId -> fileReferencesInUse.addAll(getApplication(applicationId).getModel().fileReferences() .stream() .map(FileReference::value) .collect(Collectors.toSet()))); log.log(LogLevel.INFO, "File references in use : " + fileReferencesInUse); if (!fileReferencesPath.isDirectory()) throw new RuntimeException(fileReferencesPath + " is not a directory"); Set<String> fileReferencesOnDisk = new HashSet<>(); File[] filesOnDisk = fileReferencesPath.listFiles(); if (filesOnDisk != null) fileReferencesOnDisk.addAll(Arrays.stream(filesOnDisk).map(File::getName).collect(Collectors.toSet())); log.log(LogLevel.INFO, "File references on disk (in " + fileReferencesPath + "): " + fileReferencesOnDisk); Set<String> fileReferencesToDelete = fileReferencesOnDisk .stream() .filter(fileReference -> ! fileReferencesInUse.contains(fileReference)) .collect(Collectors.toSet()); if (deleteFromDisk) { log.log(LogLevel.INFO, "Will delete file references not in use: " + fileReferencesToDelete); fileReferencesToDelete.forEach(fileReference -> { File file = new File(fileReferencesPath, fileReference); if ( ! IOUtils.recursiveDeleteDir(file)) log.log(LogLevel.WARNING, "Could not delete " + file.getAbsolutePath()); }); } return fileReferencesToDelete; }
if (!fileReferencesPath.isDirectory())
public Set<String> deleteUnusedFiledistributionReferences(File fileReferencesPath, boolean deleteFromDisk) { if (!fileReferencesPath.isDirectory()) throw new RuntimeException(fileReferencesPath + " is not a directory"); Set<String> fileReferencesInUse = new HashSet<>(); Set<ApplicationId> applicationIds = listApplications(); applicationIds.forEach(applicationId -> fileReferencesInUse.addAll(getApplication(applicationId).getModel().fileReferences() .stream() .map(FileReference::value) .collect(Collectors.toSet()))); log.log(LogLevel.INFO, "File references in use : " + fileReferencesInUse); Set<String> fileReferencesOnDisk = new HashSet<>(); File[] filesOnDisk = fileReferencesPath.listFiles(); if (filesOnDisk != null) fileReferencesOnDisk.addAll(Arrays.stream(filesOnDisk).map(File::getName).collect(Collectors.toSet())); log.log(LogLevel.INFO, "File references on disk (in " + fileReferencesPath + "): " + fileReferencesOnDisk); Set<String> fileReferencesToDelete = fileReferencesOnDisk .stream() .filter(fileReference -> ! fileReferencesInUse.contains(fileReference)) .collect(Collectors.toSet()); if (deleteFromDisk) { log.log(LogLevel.INFO, "Will delete file references not in use: " + fileReferencesToDelete); fileReferencesToDelete.forEach(fileReference -> { File file = new File(fileReferencesPath, fileReference); if ( ! IOUtils.recursiveDeleteDir(file)) log.log(LogLevel.WARNING, "Could not delete " + file.getAbsolutePath()); }); } return fileReferencesToDelete; }
class ApplicationRepository implements com.yahoo.config.provision.Deployer { private static final Logger log = Logger.getLogger(ApplicationRepository.class.getName()); private final TenantRepository tenantRepository; private final Optional<Provisioner> hostProvisioner; private final ApplicationConvergenceChecker convergeChecker; private final HttpProxy httpProxy; private final Clock clock; private final DeployLogger logger = new SilentDeployLogger(); private final ConfigserverConfig configserverConfig; private final Environment environment; private final FileDistributionStatus fileDistributionStatus; @Inject public ApplicationRepository(TenantRepository tenantRepository, HostProvisionerProvider hostProvisionerProvider, ApplicationConvergenceChecker applicationConvergenceChecker, HttpProxy httpProxy, ConfigserverConfig configserverConfig) { this(tenantRepository, hostProvisionerProvider.getHostProvisioner(), applicationConvergenceChecker, httpProxy, configserverConfig, Clock.systemUTC(), new FileDistributionStatus()); } public ApplicationRepository(TenantRepository tenantRepository, Provisioner hostProvisioner, Clock clock) { this(tenantRepository, Optional.of(hostProvisioner), new ApplicationConvergenceChecker(), new HttpProxy(new SimpleHttpFetcher()), new ConfigserverConfig(new ConfigserverConfig.Builder()), clock, new FileDistributionStatus()); } private ApplicationRepository(TenantRepository tenantRepository, Optional<Provisioner> hostProvisioner, ApplicationConvergenceChecker applicationConvergenceChecker, HttpProxy httpProxy, ConfigserverConfig configserverConfig, Clock clock, FileDistributionStatus fileDistributionStatus) { this.tenantRepository = tenantRepository; this.hostProvisioner = hostProvisioner; this.convergeChecker = applicationConvergenceChecker; this.httpProxy = httpProxy; this.clock = clock; this.configserverConfig = configserverConfig; this.environment = Environment.from(configserverConfig.environment()); this.fileDistributionStatus = fileDistributionStatus; } public PrepareResult prepare(Tenant tenant, long sessionId, PrepareParams prepareParams, Instant now) { validateThatLocalSessionIsNotActive(tenant, sessionId); LocalSession session = getLocalSession(tenant, sessionId); ApplicationId applicationId = prepareParams.getApplicationId(); Optional<ApplicationSet> currentActiveApplicationSet = getCurrentActiveApplicationSet(tenant, applicationId); Slime deployLog = createDeployLog(); DeployLogger logger = new DeployHandlerLogger(deployLog.get().setArray("log"), prepareParams.isVerbose(), applicationId); ConfigChangeActions actions = session.prepare(logger, prepareParams, currentActiveApplicationSet, tenant.getPath(), now); logConfigChangeActions(actions, logger); log.log(LogLevel.INFO, TenantRepository.logPre(applicationId) + "Session " + sessionId + " prepared successfully. "); return new PrepareResult(sessionId, actions, deployLog); } public PrepareResult prepareAndActivate(Tenant tenant, long sessionId, PrepareParams prepareParams, boolean ignoreLockFailure, boolean ignoreSessionStaleFailure, Instant now) { PrepareResult result = prepare(tenant, sessionId, prepareParams, now); activate(tenant, sessionId, prepareParams.getTimeoutBudget(), ignoreLockFailure, ignoreSessionStaleFailure); return result; } public PrepareResult deploy(CompressedApplicationInputStream in, PrepareParams prepareParams) { return deploy(in, prepareParams, false, false, clock.instant()); } public PrepareResult deploy(CompressedApplicationInputStream in, PrepareParams prepareParams, boolean ignoreLockFailure, boolean ignoreSessionStaleFailure, Instant now) { File tempDir = Files.createTempDir(); PrepareResult prepareResult; try { prepareResult = deploy(decompressApplication(in, tempDir), prepareParams, ignoreLockFailure, ignoreSessionStaleFailure, now); } finally { cleanupTempDirectory(tempDir); } return prepareResult; } public PrepareResult deploy(File applicationPackage, PrepareParams prepareParams) { return deploy(applicationPackage, prepareParams, false, false, Instant.now()); } public PrepareResult deploy(File applicationPackage, PrepareParams prepareParams, boolean ignoreLockFailure, boolean ignoreSessionStaleFailure, Instant now) { ApplicationId applicationId = prepareParams.getApplicationId(); long sessionId = createSession(applicationId, prepareParams.getTimeoutBudget(), applicationPackage); Tenant tenant = tenantRepository.getTenant(applicationId.tenant()); return prepareAndActivate(tenant, sessionId, prepareParams, ignoreLockFailure, ignoreSessionStaleFailure, now); } /** * Creates a new deployment from the active application, if available. * * @param application the active application to be redeployed * @return a new deployment from the local active, or empty if a local active application * was not present for this id (meaning it either is not active or active on another * node in the config server cluster) */ public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application) { return deployFromLocalActive(application, Duration.ofSeconds(configserverConfig.zookeeper().barrierTimeout()).plus(Duration.ofSeconds(5))); } /** * Creates a new deployment from the active application, if available. * * @param application the active application to be redeployed * @param timeout the timeout to use for each individual deployment operation * @return a new deployment from the local active, or empty if a local active application * was not present for this id (meaning it either is not active or active on another * node in the config server cluster) */ @Override public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application, Duration timeout) { Tenant tenant = tenantRepository.getTenant(application.tenant()); if (tenant == null) return Optional.empty(); LocalSession activeSession = getActiveSession(tenant, application); if (activeSession == null) return Optional.empty(); TimeoutBudget timeoutBudget = new TimeoutBudget(clock, timeout); LocalSession newSession = tenant.getSessionFactory().createSessionFromExisting(activeSession, logger, timeoutBudget); tenant.getLocalSessionRepo().addSession(newSession); Version version = decideVersion(application, environment, newSession.getVespaVersion()); return Optional.of(Deployment.unprepared(newSession, this, hostProvisioner, tenant, timeout, clock, false /* don't validate as this is already deployed */, version)); } public ApplicationId activate(Tenant tenant, long sessionId, TimeoutBudget timeoutBudget, boolean ignoreLockFailure, boolean ignoreSessionStaleFailure) { LocalSession localSession = getLocalSession(tenant, sessionId); Deployment deployment = deployFromPreparedSession(localSession, tenant, timeoutBudget.timeLeft()); deployment.setIgnoreLockFailure(ignoreLockFailure); deployment.setIgnoreSessionStaleFailure(ignoreSessionStaleFailure); deployment.activate(); return localSession.getApplicationId(); } private Deployment deployFromPreparedSession(LocalSession session, Tenant tenant, Duration timeout) { return Deployment.prepared(session, this, hostProvisioner, tenant, timeout, clock); } /** * Removes a previously deployed application * * @return true if the application was found and removed, false if it was not present * @throws RuntimeException if the remove transaction fails. This method is exception safe. */ public boolean remove(ApplicationId applicationId) { Optional<Tenant> owner = Optional.ofNullable(tenantRepository.getTenant(applicationId.tenant())); if ( ! owner.isPresent()) return false; TenantApplications tenantApplications = owner.get().getApplicationRepo(); if ( ! tenantApplications.listApplications().contains(applicationId)) return false; long sessionId = tenantApplications.getSessionIdForApplication(applicationId); LocalSessionRepo localSessionRepo = owner.get().getLocalSessionRepo(); LocalSession session = localSessionRepo.getSession(sessionId); if (session == null) return false; NestedTransaction transaction = new NestedTransaction(); localSessionRepo.removeSession(session.getSessionId(), transaction); session.delete(transaction); transaction.add(new Rotations(owner.get().getCurator(), owner.get().getPath()).delete(applicationId)); transaction.add(tenantApplications.deleteApplication(applicationId)); hostProvisioner.ifPresent(provisioner -> provisioner.remove(transaction, applicationId)); transaction.onCommitted(() -> log.log(LogLevel.INFO, "Deleted " + applicationId)); transaction.commit(); return true; } public HttpResponse clusterControllerStatusPage(ApplicationId applicationId, String hostName, String pathSuffix) { String relativePath = "clustercontroller-status/" + pathSuffix; return httpProxy.get(getApplication(applicationId), hostName, "container-clustercontroller", relativePath); } public Long getApplicationGeneration(ApplicationId applicationId) { return getApplication(applicationId).getApplicationGeneration(); } public void restart(ApplicationId applicationId, HostFilter hostFilter) { hostProvisioner.ifPresent(provisioner -> provisioner.restart(applicationId, hostFilter)); } public HttpResponse filedistributionStatus(ApplicationId applicationId, Duration timeout) { return fileDistributionStatus.status(getApplication(applicationId), timeout); } public ApplicationFile getApplicationFileFromSession(TenantName tenantName, long sessionId, String path, LocalSession.Mode mode) { Tenant tenant = tenantRepository.getTenant(tenantName); return getLocalSession(tenant, sessionId).getApplicationFile(Path.fromString(path), mode); } private Application getApplication(ApplicationId applicationId) { Tenant tenant = tenantRepository.getTenant(applicationId.tenant()); long sessionId = getSessionIdForApplication(tenant, applicationId); RemoteSession session = tenant.getRemoteSessionRepo().getSession(sessionId, 0); return session.ensureApplicationLoaded().getForVersionOrLatest(Optional.empty(), clock.instant()); } private Set<ApplicationId> listApplications() { return tenantRepository.getAllTenants().stream() .flatMap(tenant -> tenant.getApplicationRepo().listApplications().stream()) .collect(Collectors.toSet()); } public HttpResponse serviceConvergenceCheck(ApplicationId applicationId, String hostname, URI uri) { return convergeChecker.serviceConvergenceCheck(getApplication(applicationId), hostname, uri); } public HttpResponse serviceListToCheckForConfigConvergence(ApplicationId applicationId, URI uri) { return convergeChecker.serviceListToCheckForConfigConvergence(getApplication(applicationId), uri); } /** * Gets the active Session for the given application id. * * @return the active session, or null if there is no active session for the given application id. */ public LocalSession getActiveSession(ApplicationId applicationId) { return getActiveSession(tenantRepository.getTenant(applicationId.tenant()), applicationId); } public long getSessionIdForApplication(Tenant tenant, ApplicationId applicationId) { return tenant.getApplicationRepo().getSessionIdForApplication(applicationId); } public void validateThatRemoteSessionIsNotActive(Tenant tenant, long sessionId) { RemoteSession session = getRemoteSession(tenant, sessionId); if (Session.Status.ACTIVATE.equals(session.getStatus())) { throw new IllegalStateException("Session is active: " + sessionId); } } public void validateThatRemoteSessionIsPrepared(Tenant tenant, long sessionId) { RemoteSession session = getRemoteSession(tenant, sessionId); if (!Session.Status.PREPARE.equals(session.getStatus())) throw new IllegalStateException("Session not prepared: " + sessionId); } public long createSessionFromExisting(ApplicationId applicationId, DeployLogger logger, TimeoutBudget timeoutBudget) { Tenant tenant = tenantRepository.getTenant(applicationId.tenant()); LocalSessionRepo localSessionRepo = tenant.getLocalSessionRepo(); SessionFactory sessionFactory = tenant.getSessionFactory(); LocalSession fromSession = getExistingSession(tenant, applicationId); LocalSession session = sessionFactory.createSessionFromExisting(fromSession, logger, timeoutBudget); localSessionRepo.addSession(session); return session.getSessionId(); } public long createSession(ApplicationId applicationId, TimeoutBudget timeoutBudget, InputStream in, String contentType) { File tempDir = Files.createTempDir(); long sessionId; try { sessionId = createSession(applicationId, timeoutBudget, decompressApplication(in, contentType, tempDir)); } finally { cleanupTempDirectory(tempDir); } return sessionId; } public long createSession(ApplicationId applicationId, TimeoutBudget timeoutBudget, File applicationDirectory) { Tenant tenant = tenantRepository.getTenant(applicationId.tenant()); LocalSessionRepo localSessionRepo = tenant.getLocalSessionRepo(); SessionFactory sessionFactory = tenant.getSessionFactory(); LocalSession session = sessionFactory.createSession(applicationDirectory, applicationId, timeoutBudget); localSessionRepo.addSession(session); return session.getSessionId(); } public Set<TenantName> removeUnusedTenants() { Set<TenantName> tenantsToBeDeleted = tenantRepository.getAllTenantNames().stream() .filter(tenantName -> activeApplications(tenantName).isEmpty()) .filter(tenantName -> !tenantName.equals(TenantName.defaultName())) .filter(tenantName -> !tenantName.equals(TenantRepository.HOSTED_VESPA_TENANT)) .collect(Collectors.toSet()); tenantsToBeDeleted.forEach(tenantRepository::deleteTenant); return tenantsToBeDeleted; } public void deleteTenant(TenantName tenantName) { List<ApplicationId> activeApplications = activeApplications(tenantName); if (activeApplications.isEmpty()) tenantRepository.deleteTenant(tenantName); else throw new IllegalArgumentException("Cannot delete tenant '" + tenantName + "', it has active applications: " + activeApplications); } private List<ApplicationId> activeApplications(TenantName tenantName) { return tenantRepository.getTenant(tenantName).getApplicationRepo().listApplications(); } public Tenant verifyTenantAndApplication(ApplicationId applicationId) { TenantName tenantName = applicationId.tenant(); if (!tenantRepository.checkThatTenantExists(tenantName)) { throw new IllegalArgumentException("Tenant " + tenantName + " was not found."); } Tenant tenant = tenantRepository.getTenant(tenantName); List<ApplicationId> applicationIds = listApplicationIds(tenant); if (!applicationIds.contains(applicationId)) { throw new IllegalArgumentException("No such application id: " + applicationId); } return tenant; } public ApplicationMetaData getMetadataFromSession(Tenant tenant, long sessionId) { return getLocalSession(tenant, sessionId).getMetaData(); } private void validateThatLocalSessionIsNotActive(Tenant tenant, long sessionId) { LocalSession session = getLocalSession(tenant, sessionId); if (Session.Status.ACTIVATE.equals(session.getStatus())) { throw new IllegalStateException("Session is active: " + sessionId); } } private LocalSession getLocalSession(Tenant tenant, long sessionId) { LocalSession session = tenant.getLocalSessionRepo().getSession(sessionId); if (session == null) throw new NotFoundException("Session " + sessionId + " was not found"); return session; } private RemoteSession getRemoteSession(Tenant tenant, long sessionId) { RemoteSession session = tenant.getRemoteSessionRepo().getSession(sessionId); if (session == null) throw new NotFoundException("Session " + sessionId + " was not found"); return session; } private Optional<ApplicationSet> getCurrentActiveApplicationSet(Tenant tenant, ApplicationId appId) { Optional<ApplicationSet> currentActiveApplicationSet = Optional.empty(); TenantApplications applicationRepo = tenant.getApplicationRepo(); try { long currentActiveSessionId = applicationRepo.getSessionIdForApplication(appId); RemoteSession currentActiveSession = getRemoteSession(tenant, currentActiveSessionId); if (currentActiveSession != null) { currentActiveApplicationSet = Optional.ofNullable(currentActiveSession.ensureApplicationLoaded()); } } catch (IllegalArgumentException e) { } return currentActiveApplicationSet; } private File decompressApplication(InputStream in, String contentType, File tempDir) { try (CompressedApplicationInputStream application = CompressedApplicationInputStream.createFromCompressedStream(in, contentType)) { return decompressApplication(application, tempDir); } catch (IOException e) { throw new IllegalArgumentException("Unable to decompress data in body", e); } } private File decompressApplication(CompressedApplicationInputStream in, File tempDir) { try { return in.decompress(tempDir); } catch (IOException e) { throw new IllegalArgumentException("Unable to decompress stream", e); } } private List<ApplicationId> listApplicationIds(Tenant tenant) { TenantApplications applicationRepo = tenant.getApplicationRepo(); return applicationRepo.listApplications(); } private void cleanupTempDirectory(File tempDir) { logger.log(LogLevel.DEBUG, "Deleting tmp dir '" + tempDir + "'"); if (!IOUtils.recursiveDeleteDir(tempDir)) { logger.log(LogLevel.WARNING, "Not able to delete tmp dir '" + tempDir + "'"); } } void redeployAllApplications() throws InterruptedException { ExecutorService executor = Executors.newFixedThreadPool(configserverConfig.numParallelTenantLoaders(), new DaemonThreadFactory("redeploy apps")); Map<ApplicationId, Future<?>> futures = new HashMap<>(); tenantRepository.getAllTenants() .forEach(tenant -> listApplicationIds(tenant) .forEach(appId -> deployFromLocalActive(appId).ifPresent( deployment -> futures.put(appId,executor.submit(deployment::activate))))); for (Map.Entry<ApplicationId, Future<?>> f : futures.entrySet()) { try { f.getValue().get(); } catch (ExecutionException e) { throw new RuntimeException("Redeploying of " + f.getKey() + " failed", e); } } executor.shutdown(); executor.awaitTermination(365, TimeUnit.DAYS); } private LocalSession getExistingSession(Tenant tenant, ApplicationId applicationId) { TenantApplications applicationRepo = tenant.getApplicationRepo(); return getLocalSession(tenant, applicationRepo.getSessionIdForApplication(applicationId)); } private LocalSession getActiveSession(Tenant tenant, ApplicationId applicationId) { TenantApplications applicationRepo = tenant.getApplicationRepo(); if (applicationRepo.listApplications().contains(applicationId)) { return tenant.getLocalSessionRepo().getSession(applicationRepo.getSessionIdForApplication(applicationId)); } return null; } private static void logConfigChangeActions(ConfigChangeActions actions, DeployLogger logger) { RestartActions restartActions = actions.getRestartActions(); if ( ! restartActions.isEmpty()) { logger.log(Level.WARNING, "Change(s) between active and new application that require restart:\n" + restartActions.format()); } RefeedActions refeedActions = actions.getRefeedActions(); if ( ! refeedActions.isEmpty()) { boolean allAllowed = refeedActions.getEntries().stream().allMatch(RefeedActions.Entry::allowed); logger.log(allAllowed ? Level.INFO : Level.WARNING, "Change(s) between active and new application that may require re-feed:\n" + refeedActions.format()); } } /** Returns version to use when deploying application in given environment */ static Version decideVersion(ApplicationId application, Environment environment, Version targetVersion) { if (environment.isManuallyDeployed() && !"hosted-vespa".equals(application.tenant().value())) { return Vtag.currentVersion; } return targetVersion; } public Slime createDeployLog() { Slime deployLog = new Slime(); deployLog.setObject(); return deployLog; } }
class ApplicationRepository implements com.yahoo.config.provision.Deployer { private static final Logger log = Logger.getLogger(ApplicationRepository.class.getName()); private final TenantRepository tenantRepository; private final Optional<Provisioner> hostProvisioner; private final ApplicationConvergenceChecker convergeChecker; private final HttpProxy httpProxy; private final Clock clock; private final DeployLogger logger = new SilentDeployLogger(); private final ConfigserverConfig configserverConfig; private final Environment environment; private final FileDistributionStatus fileDistributionStatus; @Inject public ApplicationRepository(TenantRepository tenantRepository, HostProvisionerProvider hostProvisionerProvider, ApplicationConvergenceChecker applicationConvergenceChecker, HttpProxy httpProxy, ConfigserverConfig configserverConfig) { this(tenantRepository, hostProvisionerProvider.getHostProvisioner(), applicationConvergenceChecker, httpProxy, configserverConfig, Clock.systemUTC(), new FileDistributionStatus()); } public ApplicationRepository(TenantRepository tenantRepository, Provisioner hostProvisioner, Clock clock) { this(tenantRepository, Optional.of(hostProvisioner), new ApplicationConvergenceChecker(), new HttpProxy(new SimpleHttpFetcher()), new ConfigserverConfig(new ConfigserverConfig.Builder()), clock, new FileDistributionStatus()); } private ApplicationRepository(TenantRepository tenantRepository, Optional<Provisioner> hostProvisioner, ApplicationConvergenceChecker applicationConvergenceChecker, HttpProxy httpProxy, ConfigserverConfig configserverConfig, Clock clock, FileDistributionStatus fileDistributionStatus) { this.tenantRepository = tenantRepository; this.hostProvisioner = hostProvisioner; this.convergeChecker = applicationConvergenceChecker; this.httpProxy = httpProxy; this.clock = clock; this.configserverConfig = configserverConfig; this.environment = Environment.from(configserverConfig.environment()); this.fileDistributionStatus = fileDistributionStatus; } public PrepareResult prepare(Tenant tenant, long sessionId, PrepareParams prepareParams, Instant now) { validateThatLocalSessionIsNotActive(tenant, sessionId); LocalSession session = getLocalSession(tenant, sessionId); ApplicationId applicationId = prepareParams.getApplicationId(); Optional<ApplicationSet> currentActiveApplicationSet = getCurrentActiveApplicationSet(tenant, applicationId); Slime deployLog = createDeployLog(); DeployLogger logger = new DeployHandlerLogger(deployLog.get().setArray("log"), prepareParams.isVerbose(), applicationId); ConfigChangeActions actions = session.prepare(logger, prepareParams, currentActiveApplicationSet, tenant.getPath(), now); logConfigChangeActions(actions, logger); log.log(LogLevel.INFO, TenantRepository.logPre(applicationId) + "Session " + sessionId + " prepared successfully. "); return new PrepareResult(sessionId, actions, deployLog); } public PrepareResult prepareAndActivate(Tenant tenant, long sessionId, PrepareParams prepareParams, boolean ignoreLockFailure, boolean ignoreSessionStaleFailure, Instant now) { PrepareResult result = prepare(tenant, sessionId, prepareParams, now); activate(tenant, sessionId, prepareParams.getTimeoutBudget(), ignoreLockFailure, ignoreSessionStaleFailure); return result; } public PrepareResult deploy(CompressedApplicationInputStream in, PrepareParams prepareParams) { return deploy(in, prepareParams, false, false, clock.instant()); } public PrepareResult deploy(CompressedApplicationInputStream in, PrepareParams prepareParams, boolean ignoreLockFailure, boolean ignoreSessionStaleFailure, Instant now) { File tempDir = Files.createTempDir(); PrepareResult prepareResult; try { prepareResult = deploy(decompressApplication(in, tempDir), prepareParams, ignoreLockFailure, ignoreSessionStaleFailure, now); } finally { cleanupTempDirectory(tempDir); } return prepareResult; } public PrepareResult deploy(File applicationPackage, PrepareParams prepareParams) { return deploy(applicationPackage, prepareParams, false, false, Instant.now()); } public PrepareResult deploy(File applicationPackage, PrepareParams prepareParams, boolean ignoreLockFailure, boolean ignoreSessionStaleFailure, Instant now) { ApplicationId applicationId = prepareParams.getApplicationId(); long sessionId = createSession(applicationId, prepareParams.getTimeoutBudget(), applicationPackage); Tenant tenant = tenantRepository.getTenant(applicationId.tenant()); return prepareAndActivate(tenant, sessionId, prepareParams, ignoreLockFailure, ignoreSessionStaleFailure, now); } /** * Creates a new deployment from the active application, if available. * * @param application the active application to be redeployed * @return a new deployment from the local active, or empty if a local active application * was not present for this id (meaning it either is not active or active on another * node in the config server cluster) */ public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application) { return deployFromLocalActive(application, Duration.ofSeconds(configserverConfig.zookeeper().barrierTimeout()).plus(Duration.ofSeconds(5))); } /** * Creates a new deployment from the active application, if available. * * @param application the active application to be redeployed * @param timeout the timeout to use for each individual deployment operation * @return a new deployment from the local active, or empty if a local active application * was not present for this id (meaning it either is not active or active on another * node in the config server cluster) */ @Override public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application, Duration timeout) { Tenant tenant = tenantRepository.getTenant(application.tenant()); if (tenant == null) return Optional.empty(); LocalSession activeSession = getActiveSession(tenant, application); if (activeSession == null) return Optional.empty(); TimeoutBudget timeoutBudget = new TimeoutBudget(clock, timeout); LocalSession newSession = tenant.getSessionFactory().createSessionFromExisting(activeSession, logger, timeoutBudget); tenant.getLocalSessionRepo().addSession(newSession); Version version = decideVersion(application, environment, newSession.getVespaVersion()); return Optional.of(Deployment.unprepared(newSession, this, hostProvisioner, tenant, timeout, clock, false /* don't validate as this is already deployed */, version)); } public ApplicationId activate(Tenant tenant, long sessionId, TimeoutBudget timeoutBudget, boolean ignoreLockFailure, boolean ignoreSessionStaleFailure) { LocalSession localSession = getLocalSession(tenant, sessionId); Deployment deployment = deployFromPreparedSession(localSession, tenant, timeoutBudget.timeLeft()); deployment.setIgnoreLockFailure(ignoreLockFailure); deployment.setIgnoreSessionStaleFailure(ignoreSessionStaleFailure); deployment.activate(); return localSession.getApplicationId(); } private Deployment deployFromPreparedSession(LocalSession session, Tenant tenant, Duration timeout) { return Deployment.prepared(session, this, hostProvisioner, tenant, timeout, clock); } /** * Removes a previously deployed application * * @return true if the application was found and removed, false if it was not present * @throws RuntimeException if the remove transaction fails. This method is exception safe. */ public boolean remove(ApplicationId applicationId) { Optional<Tenant> owner = Optional.ofNullable(tenantRepository.getTenant(applicationId.tenant())); if ( ! owner.isPresent()) return false; TenantApplications tenantApplications = owner.get().getApplicationRepo(); if ( ! tenantApplications.listApplications().contains(applicationId)) return false; long sessionId = tenantApplications.getSessionIdForApplication(applicationId); LocalSessionRepo localSessionRepo = owner.get().getLocalSessionRepo(); LocalSession session = localSessionRepo.getSession(sessionId); if (session == null) return false; NestedTransaction transaction = new NestedTransaction(); localSessionRepo.removeSession(session.getSessionId(), transaction); session.delete(transaction); transaction.add(new Rotations(owner.get().getCurator(), owner.get().getPath()).delete(applicationId)); transaction.add(tenantApplications.deleteApplication(applicationId)); hostProvisioner.ifPresent(provisioner -> provisioner.remove(transaction, applicationId)); transaction.onCommitted(() -> log.log(LogLevel.INFO, "Deleted " + applicationId)); transaction.commit(); return true; } public HttpResponse clusterControllerStatusPage(ApplicationId applicationId, String hostName, String pathSuffix) { String relativePath = "clustercontroller-status/" + pathSuffix; return httpProxy.get(getApplication(applicationId), hostName, "container-clustercontroller", relativePath); } public Long getApplicationGeneration(ApplicationId applicationId) { return getApplication(applicationId).getApplicationGeneration(); } public void restart(ApplicationId applicationId, HostFilter hostFilter) { hostProvisioner.ifPresent(provisioner -> provisioner.restart(applicationId, hostFilter)); } public HttpResponse filedistributionStatus(ApplicationId applicationId, Duration timeout) { return fileDistributionStatus.status(getApplication(applicationId), timeout); } public ApplicationFile getApplicationFileFromSession(TenantName tenantName, long sessionId, String path, LocalSession.Mode mode) { Tenant tenant = tenantRepository.getTenant(tenantName); return getLocalSession(tenant, sessionId).getApplicationFile(Path.fromString(path), mode); } private Application getApplication(ApplicationId applicationId) { Tenant tenant = tenantRepository.getTenant(applicationId.tenant()); long sessionId = getSessionIdForApplication(tenant, applicationId); RemoteSession session = tenant.getRemoteSessionRepo().getSession(sessionId, 0); return session.ensureApplicationLoaded().getForVersionOrLatest(Optional.empty(), clock.instant()); } private Set<ApplicationId> listApplications() { return tenantRepository.getAllTenants().stream() .flatMap(tenant -> tenant.getApplicationRepo().listApplications().stream()) .collect(Collectors.toSet()); } public HttpResponse serviceConvergenceCheck(ApplicationId applicationId, String hostname, URI uri) { return convergeChecker.serviceConvergenceCheck(getApplication(applicationId), hostname, uri); } public HttpResponse serviceListToCheckForConfigConvergence(ApplicationId applicationId, URI uri) { return convergeChecker.serviceListToCheckForConfigConvergence(getApplication(applicationId), uri); } /** * Gets the active Session for the given application id. * * @return the active session, or null if there is no active session for the given application id. */ public LocalSession getActiveSession(ApplicationId applicationId) { return getActiveSession(tenantRepository.getTenant(applicationId.tenant()), applicationId); } public long getSessionIdForApplication(Tenant tenant, ApplicationId applicationId) { return tenant.getApplicationRepo().getSessionIdForApplication(applicationId); } public void validateThatRemoteSessionIsNotActive(Tenant tenant, long sessionId) { RemoteSession session = getRemoteSession(tenant, sessionId); if (Session.Status.ACTIVATE.equals(session.getStatus())) { throw new IllegalStateException("Session is active: " + sessionId); } } public void validateThatRemoteSessionIsPrepared(Tenant tenant, long sessionId) { RemoteSession session = getRemoteSession(tenant, sessionId); if (!Session.Status.PREPARE.equals(session.getStatus())) throw new IllegalStateException("Session not prepared: " + sessionId); } public long createSessionFromExisting(ApplicationId applicationId, DeployLogger logger, TimeoutBudget timeoutBudget) { Tenant tenant = tenantRepository.getTenant(applicationId.tenant()); LocalSessionRepo localSessionRepo = tenant.getLocalSessionRepo(); SessionFactory sessionFactory = tenant.getSessionFactory(); LocalSession fromSession = getExistingSession(tenant, applicationId); LocalSession session = sessionFactory.createSessionFromExisting(fromSession, logger, timeoutBudget); localSessionRepo.addSession(session); return session.getSessionId(); } public long createSession(ApplicationId applicationId, TimeoutBudget timeoutBudget, InputStream in, String contentType) { File tempDir = Files.createTempDir(); long sessionId; try { sessionId = createSession(applicationId, timeoutBudget, decompressApplication(in, contentType, tempDir)); } finally { cleanupTempDirectory(tempDir); } return sessionId; } public long createSession(ApplicationId applicationId, TimeoutBudget timeoutBudget, File applicationDirectory) { Tenant tenant = tenantRepository.getTenant(applicationId.tenant()); LocalSessionRepo localSessionRepo = tenant.getLocalSessionRepo(); SessionFactory sessionFactory = tenant.getSessionFactory(); LocalSession session = sessionFactory.createSession(applicationDirectory, applicationId, timeoutBudget); localSessionRepo.addSession(session); return session.getSessionId(); } public Set<TenantName> removeUnusedTenants() { Set<TenantName> tenantsToBeDeleted = tenantRepository.getAllTenantNames().stream() .filter(tenantName -> activeApplications(tenantName).isEmpty()) .filter(tenantName -> !tenantName.equals(TenantName.defaultName())) .filter(tenantName -> !tenantName.equals(TenantRepository.HOSTED_VESPA_TENANT)) .collect(Collectors.toSet()); tenantsToBeDeleted.forEach(tenantRepository::deleteTenant); return tenantsToBeDeleted; } public void deleteTenant(TenantName tenantName) { List<ApplicationId> activeApplications = activeApplications(tenantName); if (activeApplications.isEmpty()) tenantRepository.deleteTenant(tenantName); else throw new IllegalArgumentException("Cannot delete tenant '" + tenantName + "', it has active applications: " + activeApplications); } private List<ApplicationId> activeApplications(TenantName tenantName) { return tenantRepository.getTenant(tenantName).getApplicationRepo().listApplications(); } public Tenant verifyTenantAndApplication(ApplicationId applicationId) { TenantName tenantName = applicationId.tenant(); if (!tenantRepository.checkThatTenantExists(tenantName)) { throw new IllegalArgumentException("Tenant " + tenantName + " was not found."); } Tenant tenant = tenantRepository.getTenant(tenantName); List<ApplicationId> applicationIds = listApplicationIds(tenant); if (!applicationIds.contains(applicationId)) { throw new IllegalArgumentException("No such application id: " + applicationId); } return tenant; } public ApplicationMetaData getMetadataFromSession(Tenant tenant, long sessionId) { return getLocalSession(tenant, sessionId).getMetaData(); } private void validateThatLocalSessionIsNotActive(Tenant tenant, long sessionId) { LocalSession session = getLocalSession(tenant, sessionId); if (Session.Status.ACTIVATE.equals(session.getStatus())) { throw new IllegalStateException("Session is active: " + sessionId); } } private LocalSession getLocalSession(Tenant tenant, long sessionId) { LocalSession session = tenant.getLocalSessionRepo().getSession(sessionId); if (session == null) throw new NotFoundException("Session " + sessionId + " was not found"); return session; } private RemoteSession getRemoteSession(Tenant tenant, long sessionId) { RemoteSession session = tenant.getRemoteSessionRepo().getSession(sessionId); if (session == null) throw new NotFoundException("Session " + sessionId + " was not found"); return session; } private Optional<ApplicationSet> getCurrentActiveApplicationSet(Tenant tenant, ApplicationId appId) { Optional<ApplicationSet> currentActiveApplicationSet = Optional.empty(); TenantApplications applicationRepo = tenant.getApplicationRepo(); try { long currentActiveSessionId = applicationRepo.getSessionIdForApplication(appId); RemoteSession currentActiveSession = getRemoteSession(tenant, currentActiveSessionId); if (currentActiveSession != null) { currentActiveApplicationSet = Optional.ofNullable(currentActiveSession.ensureApplicationLoaded()); } } catch (IllegalArgumentException e) { } return currentActiveApplicationSet; } private File decompressApplication(InputStream in, String contentType, File tempDir) { try (CompressedApplicationInputStream application = CompressedApplicationInputStream.createFromCompressedStream(in, contentType)) { return decompressApplication(application, tempDir); } catch (IOException e) { throw new IllegalArgumentException("Unable to decompress data in body", e); } } private File decompressApplication(CompressedApplicationInputStream in, File tempDir) { try { return in.decompress(tempDir); } catch (IOException e) { throw new IllegalArgumentException("Unable to decompress stream", e); } } private List<ApplicationId> listApplicationIds(Tenant tenant) { TenantApplications applicationRepo = tenant.getApplicationRepo(); return applicationRepo.listApplications(); } private void cleanupTempDirectory(File tempDir) { logger.log(LogLevel.DEBUG, "Deleting tmp dir '" + tempDir + "'"); if (!IOUtils.recursiveDeleteDir(tempDir)) { logger.log(LogLevel.WARNING, "Not able to delete tmp dir '" + tempDir + "'"); } } void redeployAllApplications() throws InterruptedException { ExecutorService executor = Executors.newFixedThreadPool(configserverConfig.numParallelTenantLoaders(), new DaemonThreadFactory("redeploy apps")); Map<ApplicationId, Future<?>> futures = new HashMap<>(); tenantRepository.getAllTenants() .forEach(tenant -> listApplicationIds(tenant) .forEach(appId -> deployFromLocalActive(appId).ifPresent( deployment -> futures.put(appId,executor.submit(deployment::activate))))); for (Map.Entry<ApplicationId, Future<?>> f : futures.entrySet()) { try { f.getValue().get(); } catch (ExecutionException e) { throw new RuntimeException("Redeploying of " + f.getKey() + " failed", e); } } executor.shutdown(); executor.awaitTermination(365, TimeUnit.DAYS); } private LocalSession getExistingSession(Tenant tenant, ApplicationId applicationId) { TenantApplications applicationRepo = tenant.getApplicationRepo(); return getLocalSession(tenant, applicationRepo.getSessionIdForApplication(applicationId)); } private LocalSession getActiveSession(Tenant tenant, ApplicationId applicationId) { TenantApplications applicationRepo = tenant.getApplicationRepo(); if (applicationRepo.listApplications().contains(applicationId)) { return tenant.getLocalSessionRepo().getSession(applicationRepo.getSessionIdForApplication(applicationId)); } return null; } private static void logConfigChangeActions(ConfigChangeActions actions, DeployLogger logger) { RestartActions restartActions = actions.getRestartActions(); if ( ! restartActions.isEmpty()) { logger.log(Level.WARNING, "Change(s) between active and new application that require restart:\n" + restartActions.format()); } RefeedActions refeedActions = actions.getRefeedActions(); if ( ! refeedActions.isEmpty()) { boolean allAllowed = refeedActions.getEntries().stream().allMatch(RefeedActions.Entry::allowed); logger.log(allAllowed ? Level.INFO : Level.WARNING, "Change(s) between active and new application that may require re-feed:\n" + refeedActions.format()); } } /** Returns version to use when deploying application in given environment */ static Version decideVersion(ApplicationId application, Environment environment, Version targetVersion) { if (environment.isManuallyDeployed() && !"hosted-vespa".equals(application.tenant().value())) { return Vtag.currentVersion; } return targetVersion; } public Slime createDeployLog() { Slime deployLog = new Slime(); deployLog.setObject(); return deployLog; } }
If this test fails before deletion (e.g. failing assert), the directory will be left on disk. This is likely the case for all uses of `Files.createTempDir()` below. Consider using something like ``` @Rule public TemporaryFolder folder = new TemporaryFolder(); ```
public void deleteUnusedFileReferences() { File fileReferencesDir = Files.createTempDir(); File filereferenceDir = new File(fileReferencesDir, "foo"); assertTrue(filereferenceDir.mkdir()); IOUtils.writeFile(new File(filereferenceDir, "bar"), Utf8.toBytes("test")); tenantRepository.addTenant(tenantName); tenant = tenantRepository.getTenant(tenantName); Provisioner provisioner = new SessionHandlerTest.MockProvisioner(); applicationRepository = new ApplicationRepository(tenantRepository, provisioner, clock); timeoutBudget = new TimeoutBudget(clock, Duration.ofSeconds(60)); PrepareParams prepareParams = new PrepareParams.Builder().applicationId(applicationId()).ignoreValidationErrors(true).build(); deployApp(new File("src/test/apps/app"), prepareParams); boolean deleteFiles = false; Set<String> toBeDeleted = applicationRepository.deleteUnusedFiledistributionReferences(fileReferencesDir, deleteFiles); assertEquals(new HashSet<>(Collections.singletonList("foo")), toBeDeleted); assertTrue(filereferenceDir.exists()); deleteFiles = true; toBeDeleted = applicationRepository.deleteUnusedFiledistributionReferences(fileReferencesDir, deleteFiles); assertEquals(new HashSet<>(Collections.singletonList("foo")), toBeDeleted); assertFalse(filereferenceDir.exists()); }
File fileReferencesDir = Files.createTempDir();
public void deleteUnusedFileReferences() throws IOException { File fileReferencesDir = temporaryFolder.newFolder(); File filereferenceDir = new File(fileReferencesDir, "foo"); assertTrue(filereferenceDir.mkdir()); IOUtils.writeFile(new File(filereferenceDir, "bar"), Utf8.toBytes("test")); tenantRepository.addTenant(tenantName); tenant = tenantRepository.getTenant(tenantName); Provisioner provisioner = new SessionHandlerTest.MockProvisioner(); applicationRepository = new ApplicationRepository(tenantRepository, provisioner, clock); timeoutBudget = new TimeoutBudget(clock, Duration.ofSeconds(60)); PrepareParams prepareParams = new PrepareParams.Builder().applicationId(applicationId()).ignoreValidationErrors(true).build(); deployApp(new File("src/test/apps/app"), prepareParams); boolean deleteFiles = false; Set<String> toBeDeleted = applicationRepository.deleteUnusedFiledistributionReferences(fileReferencesDir, deleteFiles); assertEquals(Collections.singleton("foo"), toBeDeleted); assertTrue(filereferenceDir.exists()); deleteFiles = true; toBeDeleted = applicationRepository.deleteUnusedFiledistributionReferences(fileReferencesDir, deleteFiles); assertEquals(Collections.singleton("foo"), toBeDeleted); assertFalse(filereferenceDir.exists()); }
class ApplicationRepositoryTest { private final static File testApp = new File("src/test/apps/app"); private final static File testAppJdiscOnly = new File("src/test/apps/app-jdisc-only"); private final static File testAppJdiscOnlyRestart = new File("src/test/apps/app-jdisc-only-restart"); private final static TenantName tenantName = TenantName.from("test"); private final static Clock clock = Clock.systemUTC(); private Tenant tenant; private ApplicationRepository applicationRepository; private TenantRepository tenantRepository; private TimeoutBudget timeoutBudget; @Before public void setup() { Curator curator = new MockCurator(); tenantRepository = new TenantRepository(new TestComponentRegistry.Builder() .curator(curator) .build()); tenantRepository.addTenant(tenantName); tenant = tenantRepository.getTenant(tenantName); Provisioner provisioner = new SessionHandlerTest.MockProvisioner(); applicationRepository = new ApplicationRepository(tenantRepository, provisioner, clock); timeoutBudget = new TimeoutBudget(clock, Duration.ofSeconds(60)); } @Test public void prepareAndActivate() throws IOException { PrepareResult result = prepareAndActivateApp(testApp); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertTrue(result.configChangeActions().getRestartActions().isEmpty()); } @Test public void prepareAndActivateWithRestart() throws IOException { prepareAndActivateApp(testAppJdiscOnly); PrepareResult result = prepareAndActivateApp(testAppJdiscOnlyRestart); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertFalse(result.configChangeActions().getRestartActions().isEmpty()); } @Test public void createAndPrepareAndActivate() { PrepareResult result = deployApp(testApp); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertTrue(result.configChangeActions().getRestartActions().isEmpty()); } @Test public void deleteUnusedTenants() { deployApp(testApp); assertTrue(applicationRepository.removeUnusedTenants().isEmpty()); applicationRepository.remove(applicationId()); assertEquals(tenantName, applicationRepository.removeUnusedTenants().iterator().next()); } @Test public void decideVersion() { ApplicationId regularApp = ApplicationId.from("tenant1", "application1", "default"); ApplicationId systemApp = ApplicationId.from("hosted-vespa", "routing", "default"); Version targetVersion = Version.fromString("5.0"); assertEquals(targetVersion, ApplicationRepository.decideVersion(systemApp, Environment.prod, targetVersion)); assertEquals(targetVersion, ApplicationRepository.decideVersion(systemApp, Environment.dev, targetVersion)); assertEquals(targetVersion, ApplicationRepository.decideVersion(systemApp, Environment.perf, targetVersion)); assertEquals(targetVersion, ApplicationRepository.decideVersion(regularApp, Environment.prod, targetVersion)); assertEquals(Vtag.currentVersion, ApplicationRepository.decideVersion(regularApp, Environment.dev, targetVersion)); assertEquals(Vtag.currentVersion, ApplicationRepository.decideVersion(regularApp, Environment.perf, targetVersion)); } @Test private PrepareResult prepareAndActivateApp(File application) throws IOException { FilesApplicationPackage appDir = FilesApplicationPackage.fromFile(application); long sessionId = applicationRepository.createSession(applicationId(), timeoutBudget, appDir.getAppDir()); return applicationRepository.prepareAndActivate(tenant, sessionId, prepareParams(), false, false, Instant.now()); } private PrepareResult deployApp(File applicationPackage) { return deployApp(applicationPackage, prepareParams()); } private PrepareResult deployApp(File applicationPackage, PrepareParams prepareParams) { return applicationRepository.deploy(applicationPackage, prepareParams); } private PrepareParams prepareParams() { return new PrepareParams.Builder().applicationId(applicationId()).build(); } private ApplicationId applicationId() { return ApplicationId.from(tenantName, ApplicationName.from("testapp"), InstanceName.defaultName()); } }
class ApplicationRepositoryTest { private final static File testApp = new File("src/test/apps/app"); private final static File testAppJdiscOnly = new File("src/test/apps/app-jdisc-only"); private final static File testAppJdiscOnlyRestart = new File("src/test/apps/app-jdisc-only-restart"); private final static TenantName tenantName = TenantName.from("test"); private final static Clock clock = Clock.systemUTC(); private Tenant tenant; private ApplicationRepository applicationRepository; private TenantRepository tenantRepository; private TimeoutBudget timeoutBudget; @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); @Before public void setup() { Curator curator = new MockCurator(); tenantRepository = new TenantRepository(new TestComponentRegistry.Builder() .curator(curator) .build()); tenantRepository.addTenant(tenantName); tenant = tenantRepository.getTenant(tenantName); Provisioner provisioner = new SessionHandlerTest.MockProvisioner(); applicationRepository = new ApplicationRepository(tenantRepository, provisioner, clock); timeoutBudget = new TimeoutBudget(clock, Duration.ofSeconds(60)); } @Test public void prepareAndActivate() throws IOException { PrepareResult result = prepareAndActivateApp(testApp); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertTrue(result.configChangeActions().getRestartActions().isEmpty()); } @Test public void prepareAndActivateWithRestart() throws IOException { prepareAndActivateApp(testAppJdiscOnly); PrepareResult result = prepareAndActivateApp(testAppJdiscOnlyRestart); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertFalse(result.configChangeActions().getRestartActions().isEmpty()); } @Test public void createAndPrepareAndActivate() { PrepareResult result = deployApp(testApp); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertTrue(result.configChangeActions().getRestartActions().isEmpty()); } @Test public void deleteUnusedTenants() { deployApp(testApp); assertTrue(applicationRepository.removeUnusedTenants().isEmpty()); applicationRepository.remove(applicationId()); assertEquals(tenantName, applicationRepository.removeUnusedTenants().iterator().next()); } @Test public void decideVersion() { ApplicationId regularApp = ApplicationId.from("tenant1", "application1", "default"); ApplicationId systemApp = ApplicationId.from("hosted-vespa", "routing", "default"); Version targetVersion = Version.fromString("5.0"); assertEquals(targetVersion, ApplicationRepository.decideVersion(systemApp, Environment.prod, targetVersion)); assertEquals(targetVersion, ApplicationRepository.decideVersion(systemApp, Environment.dev, targetVersion)); assertEquals(targetVersion, ApplicationRepository.decideVersion(systemApp, Environment.perf, targetVersion)); assertEquals(targetVersion, ApplicationRepository.decideVersion(regularApp, Environment.prod, targetVersion)); assertEquals(Vtag.currentVersion, ApplicationRepository.decideVersion(regularApp, Environment.dev, targetVersion)); assertEquals(Vtag.currentVersion, ApplicationRepository.decideVersion(regularApp, Environment.perf, targetVersion)); } @Test private PrepareResult prepareAndActivateApp(File application) throws IOException { FilesApplicationPackage appDir = FilesApplicationPackage.fromFile(application); long sessionId = applicationRepository.createSession(applicationId(), timeoutBudget, appDir.getAppDir()); return applicationRepository.prepareAndActivate(tenant, sessionId, prepareParams(), false, false, Instant.now()); } private PrepareResult deployApp(File applicationPackage) { return deployApp(applicationPackage, prepareParams()); } private PrepareResult deployApp(File applicationPackage, PrepareParams prepareParams) { return applicationRepository.deploy(applicationPackage, prepareParams); } private PrepareParams prepareParams() { return new PrepareParams.Builder().applicationId(applicationId()).build(); } private ApplicationId applicationId() { return ApplicationId.from(tenantName, ApplicationName.from("testapp"), InstanceName.defaultName()); } }
Can be simplified to `Collections.singleton("foo")`.
public void deleteUnusedFileReferences() { File fileReferencesDir = Files.createTempDir(); File filereferenceDir = new File(fileReferencesDir, "foo"); assertTrue(filereferenceDir.mkdir()); IOUtils.writeFile(new File(filereferenceDir, "bar"), Utf8.toBytes("test")); tenantRepository.addTenant(tenantName); tenant = tenantRepository.getTenant(tenantName); Provisioner provisioner = new SessionHandlerTest.MockProvisioner(); applicationRepository = new ApplicationRepository(tenantRepository, provisioner, clock); timeoutBudget = new TimeoutBudget(clock, Duration.ofSeconds(60)); PrepareParams prepareParams = new PrepareParams.Builder().applicationId(applicationId()).ignoreValidationErrors(true).build(); deployApp(new File("src/test/apps/app"), prepareParams); boolean deleteFiles = false; Set<String> toBeDeleted = applicationRepository.deleteUnusedFiledistributionReferences(fileReferencesDir, deleteFiles); assertEquals(new HashSet<>(Collections.singletonList("foo")), toBeDeleted); assertTrue(filereferenceDir.exists()); deleteFiles = true; toBeDeleted = applicationRepository.deleteUnusedFiledistributionReferences(fileReferencesDir, deleteFiles); assertEquals(new HashSet<>(Collections.singletonList("foo")), toBeDeleted); assertFalse(filereferenceDir.exists()); }
assertEquals(new HashSet<>(Collections.singletonList("foo")), toBeDeleted);
public void deleteUnusedFileReferences() throws IOException { File fileReferencesDir = temporaryFolder.newFolder(); File filereferenceDir = new File(fileReferencesDir, "foo"); assertTrue(filereferenceDir.mkdir()); IOUtils.writeFile(new File(filereferenceDir, "bar"), Utf8.toBytes("test")); tenantRepository.addTenant(tenantName); tenant = tenantRepository.getTenant(tenantName); Provisioner provisioner = new SessionHandlerTest.MockProvisioner(); applicationRepository = new ApplicationRepository(tenantRepository, provisioner, clock); timeoutBudget = new TimeoutBudget(clock, Duration.ofSeconds(60)); PrepareParams prepareParams = new PrepareParams.Builder().applicationId(applicationId()).ignoreValidationErrors(true).build(); deployApp(new File("src/test/apps/app"), prepareParams); boolean deleteFiles = false; Set<String> toBeDeleted = applicationRepository.deleteUnusedFiledistributionReferences(fileReferencesDir, deleteFiles); assertEquals(Collections.singleton("foo"), toBeDeleted); assertTrue(filereferenceDir.exists()); deleteFiles = true; toBeDeleted = applicationRepository.deleteUnusedFiledistributionReferences(fileReferencesDir, deleteFiles); assertEquals(Collections.singleton("foo"), toBeDeleted); assertFalse(filereferenceDir.exists()); }
class ApplicationRepositoryTest { private final static File testApp = new File("src/test/apps/app"); private final static File testAppJdiscOnly = new File("src/test/apps/app-jdisc-only"); private final static File testAppJdiscOnlyRestart = new File("src/test/apps/app-jdisc-only-restart"); private final static TenantName tenantName = TenantName.from("test"); private final static Clock clock = Clock.systemUTC(); private Tenant tenant; private ApplicationRepository applicationRepository; private TenantRepository tenantRepository; private TimeoutBudget timeoutBudget; @Before public void setup() { Curator curator = new MockCurator(); tenantRepository = new TenantRepository(new TestComponentRegistry.Builder() .curator(curator) .build()); tenantRepository.addTenant(tenantName); tenant = tenantRepository.getTenant(tenantName); Provisioner provisioner = new SessionHandlerTest.MockProvisioner(); applicationRepository = new ApplicationRepository(tenantRepository, provisioner, clock); timeoutBudget = new TimeoutBudget(clock, Duration.ofSeconds(60)); } @Test public void prepareAndActivate() throws IOException { PrepareResult result = prepareAndActivateApp(testApp); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertTrue(result.configChangeActions().getRestartActions().isEmpty()); } @Test public void prepareAndActivateWithRestart() throws IOException { prepareAndActivateApp(testAppJdiscOnly); PrepareResult result = prepareAndActivateApp(testAppJdiscOnlyRestart); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertFalse(result.configChangeActions().getRestartActions().isEmpty()); } @Test public void createAndPrepareAndActivate() { PrepareResult result = deployApp(testApp); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertTrue(result.configChangeActions().getRestartActions().isEmpty()); } @Test public void deleteUnusedTenants() { deployApp(testApp); assertTrue(applicationRepository.removeUnusedTenants().isEmpty()); applicationRepository.remove(applicationId()); assertEquals(tenantName, applicationRepository.removeUnusedTenants().iterator().next()); } @Test public void decideVersion() { ApplicationId regularApp = ApplicationId.from("tenant1", "application1", "default"); ApplicationId systemApp = ApplicationId.from("hosted-vespa", "routing", "default"); Version targetVersion = Version.fromString("5.0"); assertEquals(targetVersion, ApplicationRepository.decideVersion(systemApp, Environment.prod, targetVersion)); assertEquals(targetVersion, ApplicationRepository.decideVersion(systemApp, Environment.dev, targetVersion)); assertEquals(targetVersion, ApplicationRepository.decideVersion(systemApp, Environment.perf, targetVersion)); assertEquals(targetVersion, ApplicationRepository.decideVersion(regularApp, Environment.prod, targetVersion)); assertEquals(Vtag.currentVersion, ApplicationRepository.decideVersion(regularApp, Environment.dev, targetVersion)); assertEquals(Vtag.currentVersion, ApplicationRepository.decideVersion(regularApp, Environment.perf, targetVersion)); } @Test private PrepareResult prepareAndActivateApp(File application) throws IOException { FilesApplicationPackage appDir = FilesApplicationPackage.fromFile(application); long sessionId = applicationRepository.createSession(applicationId(), timeoutBudget, appDir.getAppDir()); return applicationRepository.prepareAndActivate(tenant, sessionId, prepareParams(), false, false, Instant.now()); } private PrepareResult deployApp(File applicationPackage) { return deployApp(applicationPackage, prepareParams()); } private PrepareResult deployApp(File applicationPackage, PrepareParams prepareParams) { return applicationRepository.deploy(applicationPackage, prepareParams); } private PrepareParams prepareParams() { return new PrepareParams.Builder().applicationId(applicationId()).build(); } private ApplicationId applicationId() { return ApplicationId.from(tenantName, ApplicationName.from("testapp"), InstanceName.defaultName()); } }
class ApplicationRepositoryTest { private final static File testApp = new File("src/test/apps/app"); private final static File testAppJdiscOnly = new File("src/test/apps/app-jdisc-only"); private final static File testAppJdiscOnlyRestart = new File("src/test/apps/app-jdisc-only-restart"); private final static TenantName tenantName = TenantName.from("test"); private final static Clock clock = Clock.systemUTC(); private Tenant tenant; private ApplicationRepository applicationRepository; private TenantRepository tenantRepository; private TimeoutBudget timeoutBudget; @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); @Before public void setup() { Curator curator = new MockCurator(); tenantRepository = new TenantRepository(new TestComponentRegistry.Builder() .curator(curator) .build()); tenantRepository.addTenant(tenantName); tenant = tenantRepository.getTenant(tenantName); Provisioner provisioner = new SessionHandlerTest.MockProvisioner(); applicationRepository = new ApplicationRepository(tenantRepository, provisioner, clock); timeoutBudget = new TimeoutBudget(clock, Duration.ofSeconds(60)); } @Test public void prepareAndActivate() throws IOException { PrepareResult result = prepareAndActivateApp(testApp); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertTrue(result.configChangeActions().getRestartActions().isEmpty()); } @Test public void prepareAndActivateWithRestart() throws IOException { prepareAndActivateApp(testAppJdiscOnly); PrepareResult result = prepareAndActivateApp(testAppJdiscOnlyRestart); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertFalse(result.configChangeActions().getRestartActions().isEmpty()); } @Test public void createAndPrepareAndActivate() { PrepareResult result = deployApp(testApp); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertTrue(result.configChangeActions().getRestartActions().isEmpty()); } @Test public void deleteUnusedTenants() { deployApp(testApp); assertTrue(applicationRepository.removeUnusedTenants().isEmpty()); applicationRepository.remove(applicationId()); assertEquals(tenantName, applicationRepository.removeUnusedTenants().iterator().next()); } @Test public void decideVersion() { ApplicationId regularApp = ApplicationId.from("tenant1", "application1", "default"); ApplicationId systemApp = ApplicationId.from("hosted-vespa", "routing", "default"); Version targetVersion = Version.fromString("5.0"); assertEquals(targetVersion, ApplicationRepository.decideVersion(systemApp, Environment.prod, targetVersion)); assertEquals(targetVersion, ApplicationRepository.decideVersion(systemApp, Environment.dev, targetVersion)); assertEquals(targetVersion, ApplicationRepository.decideVersion(systemApp, Environment.perf, targetVersion)); assertEquals(targetVersion, ApplicationRepository.decideVersion(regularApp, Environment.prod, targetVersion)); assertEquals(Vtag.currentVersion, ApplicationRepository.decideVersion(regularApp, Environment.dev, targetVersion)); assertEquals(Vtag.currentVersion, ApplicationRepository.decideVersion(regularApp, Environment.perf, targetVersion)); } @Test private PrepareResult prepareAndActivateApp(File application) throws IOException { FilesApplicationPackage appDir = FilesApplicationPackage.fromFile(application); long sessionId = applicationRepository.createSession(applicationId(), timeoutBudget, appDir.getAppDir()); return applicationRepository.prepareAndActivate(tenant, sessionId, prepareParams(), false, false, Instant.now()); } private PrepareResult deployApp(File applicationPackage) { return deployApp(applicationPackage, prepareParams()); } private PrepareResult deployApp(File applicationPackage, PrepareParams prepareParams) { return applicationRepository.deploy(applicationPackage, prepareParams); } private PrepareParams prepareParams() { return new PrepareParams.Builder().applicationId(applicationId()).build(); } private ApplicationId applicationId() { return ApplicationId.from(tenantName, ApplicationName.from("testapp"), InstanceName.defaultName()); } }
Same as above.
public void deleteUnusedFileReferences() { File fileReferencesDir = Files.createTempDir(); File filereferenceDir = new File(fileReferencesDir, "foo"); assertTrue(filereferenceDir.mkdir()); IOUtils.writeFile(new File(filereferenceDir, "bar"), Utf8.toBytes("test")); tenantRepository.addTenant(tenantName); tenant = tenantRepository.getTenant(tenantName); Provisioner provisioner = new SessionHandlerTest.MockProvisioner(); applicationRepository = new ApplicationRepository(tenantRepository, provisioner, clock); timeoutBudget = new TimeoutBudget(clock, Duration.ofSeconds(60)); PrepareParams prepareParams = new PrepareParams.Builder().applicationId(applicationId()).ignoreValidationErrors(true).build(); deployApp(new File("src/test/apps/app"), prepareParams); boolean deleteFiles = false; Set<String> toBeDeleted = applicationRepository.deleteUnusedFiledistributionReferences(fileReferencesDir, deleteFiles); assertEquals(new HashSet<>(Collections.singletonList("foo")), toBeDeleted); assertTrue(filereferenceDir.exists()); deleteFiles = true; toBeDeleted = applicationRepository.deleteUnusedFiledistributionReferences(fileReferencesDir, deleteFiles); assertEquals(new HashSet<>(Collections.singletonList("foo")), toBeDeleted); assertFalse(filereferenceDir.exists()); }
assertEquals(new HashSet<>(Collections.singletonList("foo")), toBeDeleted);
public void deleteUnusedFileReferences() throws IOException { File fileReferencesDir = temporaryFolder.newFolder(); File filereferenceDir = new File(fileReferencesDir, "foo"); assertTrue(filereferenceDir.mkdir()); IOUtils.writeFile(new File(filereferenceDir, "bar"), Utf8.toBytes("test")); tenantRepository.addTenant(tenantName); tenant = tenantRepository.getTenant(tenantName); Provisioner provisioner = new SessionHandlerTest.MockProvisioner(); applicationRepository = new ApplicationRepository(tenantRepository, provisioner, clock); timeoutBudget = new TimeoutBudget(clock, Duration.ofSeconds(60)); PrepareParams prepareParams = new PrepareParams.Builder().applicationId(applicationId()).ignoreValidationErrors(true).build(); deployApp(new File("src/test/apps/app"), prepareParams); boolean deleteFiles = false; Set<String> toBeDeleted = applicationRepository.deleteUnusedFiledistributionReferences(fileReferencesDir, deleteFiles); assertEquals(Collections.singleton("foo"), toBeDeleted); assertTrue(filereferenceDir.exists()); deleteFiles = true; toBeDeleted = applicationRepository.deleteUnusedFiledistributionReferences(fileReferencesDir, deleteFiles); assertEquals(Collections.singleton("foo"), toBeDeleted); assertFalse(filereferenceDir.exists()); }
class ApplicationRepositoryTest { private final static File testApp = new File("src/test/apps/app"); private final static File testAppJdiscOnly = new File("src/test/apps/app-jdisc-only"); private final static File testAppJdiscOnlyRestart = new File("src/test/apps/app-jdisc-only-restart"); private final static TenantName tenantName = TenantName.from("test"); private final static Clock clock = Clock.systemUTC(); private Tenant tenant; private ApplicationRepository applicationRepository; private TenantRepository tenantRepository; private TimeoutBudget timeoutBudget; @Before public void setup() { Curator curator = new MockCurator(); tenantRepository = new TenantRepository(new TestComponentRegistry.Builder() .curator(curator) .build()); tenantRepository.addTenant(tenantName); tenant = tenantRepository.getTenant(tenantName); Provisioner provisioner = new SessionHandlerTest.MockProvisioner(); applicationRepository = new ApplicationRepository(tenantRepository, provisioner, clock); timeoutBudget = new TimeoutBudget(clock, Duration.ofSeconds(60)); } @Test public void prepareAndActivate() throws IOException { PrepareResult result = prepareAndActivateApp(testApp); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertTrue(result.configChangeActions().getRestartActions().isEmpty()); } @Test public void prepareAndActivateWithRestart() throws IOException { prepareAndActivateApp(testAppJdiscOnly); PrepareResult result = prepareAndActivateApp(testAppJdiscOnlyRestart); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertFalse(result.configChangeActions().getRestartActions().isEmpty()); } @Test public void createAndPrepareAndActivate() { PrepareResult result = deployApp(testApp); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertTrue(result.configChangeActions().getRestartActions().isEmpty()); } @Test public void deleteUnusedTenants() { deployApp(testApp); assertTrue(applicationRepository.removeUnusedTenants().isEmpty()); applicationRepository.remove(applicationId()); assertEquals(tenantName, applicationRepository.removeUnusedTenants().iterator().next()); } @Test public void decideVersion() { ApplicationId regularApp = ApplicationId.from("tenant1", "application1", "default"); ApplicationId systemApp = ApplicationId.from("hosted-vespa", "routing", "default"); Version targetVersion = Version.fromString("5.0"); assertEquals(targetVersion, ApplicationRepository.decideVersion(systemApp, Environment.prod, targetVersion)); assertEquals(targetVersion, ApplicationRepository.decideVersion(systemApp, Environment.dev, targetVersion)); assertEquals(targetVersion, ApplicationRepository.decideVersion(systemApp, Environment.perf, targetVersion)); assertEquals(targetVersion, ApplicationRepository.decideVersion(regularApp, Environment.prod, targetVersion)); assertEquals(Vtag.currentVersion, ApplicationRepository.decideVersion(regularApp, Environment.dev, targetVersion)); assertEquals(Vtag.currentVersion, ApplicationRepository.decideVersion(regularApp, Environment.perf, targetVersion)); } @Test private PrepareResult prepareAndActivateApp(File application) throws IOException { FilesApplicationPackage appDir = FilesApplicationPackage.fromFile(application); long sessionId = applicationRepository.createSession(applicationId(), timeoutBudget, appDir.getAppDir()); return applicationRepository.prepareAndActivate(tenant, sessionId, prepareParams(), false, false, Instant.now()); } private PrepareResult deployApp(File applicationPackage) { return deployApp(applicationPackage, prepareParams()); } private PrepareResult deployApp(File applicationPackage, PrepareParams prepareParams) { return applicationRepository.deploy(applicationPackage, prepareParams); } private PrepareParams prepareParams() { return new PrepareParams.Builder().applicationId(applicationId()).build(); } private ApplicationId applicationId() { return ApplicationId.from(tenantName, ApplicationName.from("testapp"), InstanceName.defaultName()); } }
class ApplicationRepositoryTest { private final static File testApp = new File("src/test/apps/app"); private final static File testAppJdiscOnly = new File("src/test/apps/app-jdisc-only"); private final static File testAppJdiscOnlyRestart = new File("src/test/apps/app-jdisc-only-restart"); private final static TenantName tenantName = TenantName.from("test"); private final static Clock clock = Clock.systemUTC(); private Tenant tenant; private ApplicationRepository applicationRepository; private TenantRepository tenantRepository; private TimeoutBudget timeoutBudget; @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); @Before public void setup() { Curator curator = new MockCurator(); tenantRepository = new TenantRepository(new TestComponentRegistry.Builder() .curator(curator) .build()); tenantRepository.addTenant(tenantName); tenant = tenantRepository.getTenant(tenantName); Provisioner provisioner = new SessionHandlerTest.MockProvisioner(); applicationRepository = new ApplicationRepository(tenantRepository, provisioner, clock); timeoutBudget = new TimeoutBudget(clock, Duration.ofSeconds(60)); } @Test public void prepareAndActivate() throws IOException { PrepareResult result = prepareAndActivateApp(testApp); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertTrue(result.configChangeActions().getRestartActions().isEmpty()); } @Test public void prepareAndActivateWithRestart() throws IOException { prepareAndActivateApp(testAppJdiscOnly); PrepareResult result = prepareAndActivateApp(testAppJdiscOnlyRestart); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertFalse(result.configChangeActions().getRestartActions().isEmpty()); } @Test public void createAndPrepareAndActivate() { PrepareResult result = deployApp(testApp); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertTrue(result.configChangeActions().getRestartActions().isEmpty()); } @Test public void deleteUnusedTenants() { deployApp(testApp); assertTrue(applicationRepository.removeUnusedTenants().isEmpty()); applicationRepository.remove(applicationId()); assertEquals(tenantName, applicationRepository.removeUnusedTenants().iterator().next()); } @Test public void decideVersion() { ApplicationId regularApp = ApplicationId.from("tenant1", "application1", "default"); ApplicationId systemApp = ApplicationId.from("hosted-vespa", "routing", "default"); Version targetVersion = Version.fromString("5.0"); assertEquals(targetVersion, ApplicationRepository.decideVersion(systemApp, Environment.prod, targetVersion)); assertEquals(targetVersion, ApplicationRepository.decideVersion(systemApp, Environment.dev, targetVersion)); assertEquals(targetVersion, ApplicationRepository.decideVersion(systemApp, Environment.perf, targetVersion)); assertEquals(targetVersion, ApplicationRepository.decideVersion(regularApp, Environment.prod, targetVersion)); assertEquals(Vtag.currentVersion, ApplicationRepository.decideVersion(regularApp, Environment.dev, targetVersion)); assertEquals(Vtag.currentVersion, ApplicationRepository.decideVersion(regularApp, Environment.perf, targetVersion)); } @Test private PrepareResult prepareAndActivateApp(File application) throws IOException { FilesApplicationPackage appDir = FilesApplicationPackage.fromFile(application); long sessionId = applicationRepository.createSession(applicationId(), timeoutBudget, appDir.getAppDir()); return applicationRepository.prepareAndActivate(tenant, sessionId, prepareParams(), false, false, Instant.now()); } private PrepareResult deployApp(File applicationPackage) { return deployApp(applicationPackage, prepareParams()); } private PrepareResult deployApp(File applicationPackage, PrepareParams prepareParams) { return applicationRepository.deploy(applicationPackage, prepareParams); } private PrepareParams prepareParams() { return new PrepareParams.Builder().applicationId(applicationId()).build(); } private ApplicationId applicationId() { return ApplicationId.from(tenantName, ApplicationName.from("testapp"), InstanceName.defaultName()); } }
Visitors operate on bucket granularities and `notfound` is a document-level metric, so this should never be nonzero. I.e. this can be removed.
private static Set<Metric> getStorageMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("vds.datastored.alldisks.docs.average")); metrics.add(new Metric("vds.datastored.alldisks.bytes.average")); metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.max")); metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.sum")); metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.count")); metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.average")); metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.max")); metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.sum")); metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.count")); metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.queuesize.max")); metrics.add(new Metric("vds.filestor.alldisks.queuesize.sum")); metrics.add(new Metric("vds.filestor.alldisks.queuesize.count")); metrics.add(new Metric("vds.filestor.alldisks.queuesize.average")); metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.max")); metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.sum")); metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.count")); metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.average")); metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.max")); metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.sum")); metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.count")); metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.average")); metrics.add(new Metric("vds.visitor.allthreads.completed.sum.average")); metrics.add(new Metric("vds.visitor.allthreads.completed.sum.rate")); metrics.add(new Metric("vds.visitor.allthreads.created.sum.rate")); metrics.add(new Metric("vds.visitor.allthreads.failed.sum.rate")); metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.max")); metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.sum")); metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.count")); metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.average")); metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.max")); metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.sum")); metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.count")); metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.splitbuckets.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.joinbuckets.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.setbucketstates.count.rate")); metrics.add(new Metric("vds.idealstate.buckets_rechecking.average")); metrics.add(new Metric("vds.idealstate.idealstate_diff.average")); metrics.add(new Metric("vds.idealstate.buckets_toofewcopies.average")); metrics.add(new Metric("vds.idealstate.buckets_toomanycopies.average")); metrics.add(new Metric("vds.idealstate.buckets.average")); metrics.add(new Metric("vds.idealstate.buckets_notrusted.average")); metrics.add(new Metric("vds.idealstate.delete_bucket.done_ok.rate")); metrics.add(new Metric("vds.idealstate.delete_bucket.done_failed.rate")); metrics.add(new Metric("vds.idealstate.delete_bucket.pending.average")); metrics.add(new Metric("vds.idealstate.merge_bucket.done_ok.rate")); metrics.add(new Metric("vds.idealstate.merge_bucket.done_failed.rate")); metrics.add(new Metric("vds.idealstate.merge_bucket.pending.average")); metrics.add(new Metric("vds.idealstate.split_bucket.done_ok.rate")); metrics.add(new Metric("vds.idealstate.split_bucket.done_failed.rate")); metrics.add(new Metric("vds.idealstate.split_bucket.pending.average")); metrics.add(new Metric("vds.idealstate.join_bucket.done_ok.rate")); metrics.add(new Metric("vds.idealstate.join_bucket.done_failed.rate")); metrics.add(new Metric("vds.idealstate.join_bucket.pending.average")); metrics.add(new Metric("vds.idealstate.garbage_collection.done_ok.rate")); metrics.add(new Metric("vds.idealstate.garbage_collection.done_failed.rate")); metrics.add(new Metric("vds.idealstate.garbage_collection.pending.average")); metrics.add(new Metric("vds.distributor.puts.sum.latency.max")); metrics.add(new Metric("vds.distributor.puts.sum.latency.sum")); metrics.add(new Metric("vds.distributor.puts.sum.latency.count")); metrics.add(new Metric("vds.distributor.puts.sum.latency.average")); metrics.add(new Metric("vds.distributor.puts.sum.ok.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.notfound.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.test_and_set_failed")); metrics.add(new Metric("vds.distributor.removes.sum.latency.max")); metrics.add(new Metric("vds.distributor.removes.sum.latency.sum")); metrics.add(new Metric("vds.distributor.removes.sum.latency.count")); metrics.add(new Metric("vds.distributor.removes.sum.latency.average")); metrics.add(new Metric("vds.distributor.removes.sum.ok.rate")); metrics.add(new Metric("vds.distributor.removes.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.removes.sum.failures.notfound.rate")); metrics.add(new Metric("vds.distributor.removes.sum.failures.test_and_set_failed")); metrics.add(new Metric("vds.distributor.updates.sum.latency.max")); metrics.add(new Metric("vds.distributor.updates.sum.latency.sum")); metrics.add(new Metric("vds.distributor.updates.sum.latency.count")); metrics.add(new Metric("vds.distributor.updates.sum.latency.average")); metrics.add(new Metric("vds.distributor.updates.sum.ok.rate")); metrics.add(new Metric("vds.distributor.updates.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.updates.sum.failures.notfound.rate")); metrics.add(new Metric("vds.distributor.updates.sum.failures.test_and_set_failed")); metrics.add(new Metric("vds.distributor.updates.sum.diverging_timestamp_updates.rate")); metrics.add(new Metric("vds.distributor.removelocations.sum.ok.rate")); metrics.add(new Metric("vds.distributor.removelocations.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.gets.sum.latency.max")); metrics.add(new Metric("vds.distributor.gets.sum.latency.sum")); metrics.add(new Metric("vds.distributor.gets.sum.latency.count")); metrics.add(new Metric("vds.distributor.gets.sum.latency.average")); metrics.add(new Metric("vds.distributor.gets.sum.ok.rate")); metrics.add(new Metric("vds.distributor.gets.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.gets.sum.failures.notfound.rate")); metrics.add(new Metric("vds.distributor.visitor.sum.latency.max")); metrics.add(new Metric("vds.distributor.visitor.sum.latency.sum")); metrics.add(new Metric("vds.distributor.visitor.sum.latency.count")); metrics.add(new Metric("vds.distributor.visitor.sum.latency.average")); metrics.add(new Metric("vds.distributor.visitor.sum.ok.rate")); metrics.add(new Metric("vds.distributor.visitor.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.visitor.sum.failures.notfound.rate")); metrics.add(new Metric("vds.distributor.docsstored.average")); metrics.add(new Metric("vds.distributor.bytesstored.average")); metrics.add(new Metric("vds.bouncer.clock_skew_aborts.count")); return metrics; }
metrics.add(new Metric("vds.distributor.visitor.sum.failures.notfound.rate"));
private static Set<Metric> getStorageMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("vds.datastored.alldisks.docs.average")); metrics.add(new Metric("vds.datastored.alldisks.bytes.average")); metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.max")); metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.sum")); metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.count")); metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.average")); metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.max")); metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.sum")); metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.count")); metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.queuesize.max")); metrics.add(new Metric("vds.filestor.alldisks.queuesize.sum")); metrics.add(new Metric("vds.filestor.alldisks.queuesize.count")); metrics.add(new Metric("vds.filestor.alldisks.queuesize.average")); metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.max")); metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.sum")); metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.count")); metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.average")); metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.max")); metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.sum")); metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.count")); metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.average")); metrics.add(new Metric("vds.visitor.allthreads.completed.sum.average")); metrics.add(new Metric("vds.visitor.allthreads.completed.sum.rate")); metrics.add(new Metric("vds.visitor.allthreads.created.sum.rate")); metrics.add(new Metric("vds.visitor.allthreads.failed.sum.rate")); metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.max")); metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.sum")); metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.count")); metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.average")); metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.max")); metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.sum")); metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.count")); metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.splitbuckets.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.joinbuckets.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.setbucketstates.count.rate")); metrics.add(new Metric("vds.idealstate.buckets_rechecking.average")); metrics.add(new Metric("vds.idealstate.idealstate_diff.average")); metrics.add(new Metric("vds.idealstate.buckets_toofewcopies.average")); metrics.add(new Metric("vds.idealstate.buckets_toomanycopies.average")); metrics.add(new Metric("vds.idealstate.buckets.average")); metrics.add(new Metric("vds.idealstate.buckets_notrusted.average")); metrics.add(new Metric("vds.idealstate.delete_bucket.done_ok.rate")); metrics.add(new Metric("vds.idealstate.delete_bucket.done_failed.rate")); metrics.add(new Metric("vds.idealstate.delete_bucket.pending.average")); metrics.add(new Metric("vds.idealstate.merge_bucket.done_ok.rate")); metrics.add(new Metric("vds.idealstate.merge_bucket.done_failed.rate")); metrics.add(new Metric("vds.idealstate.merge_bucket.pending.average")); metrics.add(new Metric("vds.idealstate.split_bucket.done_ok.rate")); metrics.add(new Metric("vds.idealstate.split_bucket.done_failed.rate")); metrics.add(new Metric("vds.idealstate.split_bucket.pending.average")); metrics.add(new Metric("vds.idealstate.join_bucket.done_ok.rate")); metrics.add(new Metric("vds.idealstate.join_bucket.done_failed.rate")); metrics.add(new Metric("vds.idealstate.join_bucket.pending.average")); metrics.add(new Metric("vds.idealstate.garbage_collection.done_ok.rate")); metrics.add(new Metric("vds.idealstate.garbage_collection.done_failed.rate")); metrics.add(new Metric("vds.idealstate.garbage_collection.pending.average")); metrics.add(new Metric("vds.distributor.puts.sum.latency.max")); metrics.add(new Metric("vds.distributor.puts.sum.latency.sum")); metrics.add(new Metric("vds.distributor.puts.sum.latency.count")); metrics.add(new Metric("vds.distributor.puts.sum.latency.average")); metrics.add(new Metric("vds.distributor.puts.sum.ok.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.notfound.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.test_and_set_failed")); metrics.add(new Metric("vds.distributor.removes.sum.latency.max")); metrics.add(new Metric("vds.distributor.removes.sum.latency.sum")); metrics.add(new Metric("vds.distributor.removes.sum.latency.count")); metrics.add(new Metric("vds.distributor.removes.sum.latency.average")); metrics.add(new Metric("vds.distributor.removes.sum.ok.rate")); metrics.add(new Metric("vds.distributor.removes.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.removes.sum.failures.notfound.rate")); metrics.add(new Metric("vds.distributor.removes.sum.failures.test_and_set_failed")); metrics.add(new Metric("vds.distributor.updates.sum.latency.max")); metrics.add(new Metric("vds.distributor.updates.sum.latency.sum")); metrics.add(new Metric("vds.distributor.updates.sum.latency.count")); metrics.add(new Metric("vds.distributor.updates.sum.latency.average")); metrics.add(new Metric("vds.distributor.updates.sum.ok.rate")); metrics.add(new Metric("vds.distributor.updates.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.updates.sum.failures.notfound.rate")); metrics.add(new Metric("vds.distributor.updates.sum.failures.test_and_set_failed")); metrics.add(new Metric("vds.distributor.updates.sum.diverging_timestamp_updates.rate")); metrics.add(new Metric("vds.distributor.removelocations.sum.ok.rate")); metrics.add(new Metric("vds.distributor.removelocations.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.gets.sum.latency.max")); metrics.add(new Metric("vds.distributor.gets.sum.latency.sum")); metrics.add(new Metric("vds.distributor.gets.sum.latency.count")); metrics.add(new Metric("vds.distributor.gets.sum.latency.average")); metrics.add(new Metric("vds.distributor.gets.sum.ok.rate")); metrics.add(new Metric("vds.distributor.gets.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.gets.sum.failures.notfound.rate")); metrics.add(new Metric("vds.distributor.visitor.sum.latency.max")); metrics.add(new Metric("vds.distributor.visitor.sum.latency.sum")); metrics.add(new Metric("vds.distributor.visitor.sum.latency.count")); metrics.add(new Metric("vds.distributor.visitor.sum.latency.average")); metrics.add(new Metric("vds.distributor.visitor.sum.ok.rate")); metrics.add(new Metric("vds.distributor.visitor.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.docsstored.average")); metrics.add(new Metric("vds.distributor.bytesstored.average")); metrics.add(new Metric("vds.bouncer.clock_skew_aborts.count")); return metrics; }
class VespaMetricSet { public static final MetricSet vespaMetricSet = new MetricSet("vespa", getVespaMetrics(), singleton(defaultVespaMetricSet)); private static Set<Metric> getVespaMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.addAll(getSearchNodeMetrics()); metrics.addAll(getStorageMetrics()); metrics.addAll(getDocprocMetrics()); metrics.addAll(getClusterControllerMetrics()); metrics.addAll(getQrserverMetrics()); metrics.addAll(getContainerMetrics()); metrics.addAll(getConfigServerMetrics()); metrics.addAll(getSentinelMetrics()); metrics.addAll(getOtherMetrics()); return Collections.unmodifiableSet(metrics); } private static Set<Metric> getSentinelMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("sentinel.restarts.count")); metrics.add(new Metric("sentinel.totalRestarts.last")); metrics.add(new Metric("sentinel.uptime.last")); metrics.add(new Metric("sentinel.running.count")); metrics.add(new Metric("sentinel.running.last")); return metrics; } private static Set<Metric> getOtherMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("slobrok.heartbeats.failed.count")); metrics.add(new Metric("logd.processed.lines.count")); metrics.add(new Metric("jrt.transport.tls-certificate-verification-failures")); metrics.add(new Metric("jrt.transport.peer-authorization-failures")); metrics.add(new Metric("jrt.transport.server.tls-connections-established")); metrics.add(new Metric("jrt.transport.client.tls-connections-established")); metrics.add(new Metric("jrt.transport.server.unencrypted-connections-established")); metrics.add(new Metric("jrt.transport.client.unencrypted-connections-established")); metrics.add(new Metric("vds.server.network.tls-handshakes-failed")); metrics.add(new Metric("vds.server.network.peer-authorization-failures")); metrics.add(new Metric("vds.server.network.client.tls-connections-established")); metrics.add(new Metric("vds.server.network.server.tls-connections-established")); metrics.add(new Metric("vds.server.network.client.insecure-connections-established")); metrics.add(new Metric("vds.server.network.server.insecure-connections-established")); metrics.add(new Metric("vds.server.network.tls-connections-broken")); metrics.add(new Metric("vds.server.network.failed-tls-config-reloads")); metrics.add(new Metric("vds.server.fnet.num-connections")); return metrics; } private static Set<Metric> getConfigServerMetrics() { Set<Metric> metrics =new LinkedHashSet<>(); metrics.add(new Metric("configserver.requests.count")); metrics.add(new Metric("configserver.failedRequests.count")); metrics.add(new Metric("configserver.latency.max")); metrics.add(new Metric("configserver.latency.sum")); metrics.add(new Metric("configserver.latency.count")); metrics.add(new Metric("configserver.latency.average")); metrics.add(new Metric("configserver.cacheConfigElems.last")); metrics.add(new Metric("configserver.cacheChecksumElems.last")); metrics.add(new Metric("configserver.hosts.last")); metrics.add(new Metric("configserver.delayedResponses.count")); metrics.add(new Metric("configserver.sessionChangeErrors.count")); metrics.add(new Metric("configserver.zkZNodes.last")); metrics.add(new Metric("configserver.zkAvgLatency.last")); metrics.add(new Metric("configserver.zkMaxLatency.last")); metrics.add(new Metric("configserver.zkConnections.last")); metrics.add(new Metric("configserver.zkOutstandingRequests.last")); return metrics; } private static Set<Metric> getContainerMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("handled.requests.count")); metrics.add(new Metric("handled.latency.max")); metrics.add(new Metric("handled.latency.sum")); metrics.add(new Metric("handled.latency.count")); metrics.add(new Metric("handled.latency.average")); metrics.add(new Metric("serverRejectedRequests.rate")); metrics.add(new Metric("serverRejectedRequests.count")); metrics.add(new Metric("serverThreadPoolSize.average")); metrics.add(new Metric("serverThreadPoolSize.min")); metrics.add(new Metric("serverThreadPoolSize.max")); metrics.add(new Metric("serverThreadPoolSize.rate")); metrics.add(new Metric("serverThreadPoolSize.count")); metrics.add(new Metric("serverThreadPoolSize.last")); metrics.add(new Metric("serverActiveThreads.average")); metrics.add(new Metric("serverActiveThreads.min")); metrics.add(new Metric("serverActiveThreads.max")); metrics.add(new Metric("serverActiveThreads.rate")); metrics.add(new Metric("serverActiveThreads.sum")); metrics.add(new Metric("serverActiveThreads.count")); metrics.add(new Metric("serverActiveThreads.last")); metrics.add(new Metric("httpapi_latency.max")); metrics.add(new Metric("httpapi_latency.sum")); metrics.add(new Metric("httpapi_latency.count")); metrics.add(new Metric("httpapi_latency.average")); metrics.add(new Metric("httpapi_pending.max")); metrics.add(new Metric("httpapi_pending.sum")); metrics.add(new Metric("httpapi_pending.count")); metrics.add(new Metric("httpapi_pending.average")); metrics.add(new Metric("httpapi_num_operations.rate")); metrics.add(new Metric("httpapi_num_updates.rate")); metrics.add(new Metric("httpapi_num_removes.rate")); metrics.add(new Metric("httpapi_num_puts.rate")); metrics.add(new Metric("httpapi_succeeded.rate")); metrics.add(new Metric("httpapi_failed.rate")); metrics.add(new Metric("httpapi_parse_error.rate")); metrics.add(new Metric("mem.heap.total.average")); metrics.add(new Metric("mem.heap.free.average")); metrics.add(new Metric("mem.heap.used.average")); metrics.add(new Metric("mem.heap.used.max")); metrics.add(new Metric("jdisc.memory_mappings.max")); metrics.add(new Metric("jdisc.open_file_descriptors.max")); metrics.add(new Metric("jdisc.gc.count.average")); metrics.add(new Metric("jdisc.gc.count.max")); metrics.add(new Metric("jdisc.gc.count.last")); metrics.add(new Metric("jdisc.gc.ms.average")); metrics.add(new Metric("jdisc.gc.ms.max")); metrics.add(new Metric("jdisc.gc.ms.last")); metrics.add(new Metric("jdisc.deactivated_containers.total.last")); metrics.add(new Metric("jdisc.deactivated_containers.with_retained_refs.last")); metrics.add(new Metric("athenz-tenant-cert.expiry.seconds.last")); metrics.add(new Metric("jdisc.http.request.prematurely_closed.rate")); metrics.add(new Metric("http.status.1xx.rate")); metrics.add(new Metric("http.status.2xx.rate")); metrics.add(new Metric("http.status.3xx.rate")); metrics.add(new Metric("http.status.4xx.rate")); metrics.add(new Metric("http.status.5xx.rate")); metrics.add(new Metric("http.status.401.rate")); metrics.add(new Metric("http.status.403.rate")); metrics.add(new Metric("jdisc.http.request.uri_length.max")); metrics.add(new Metric("jdisc.http.request.uri_length.sum")); metrics.add(new Metric("jdisc.http.request.uri_length.count")); metrics.add(new Metric("jdisc.http.request.uri_length.average")); metrics.add(new Metric("jdisc.http.request.content_size.max")); metrics.add(new Metric("jdisc.http.request.content_size.sum")); metrics.add(new Metric("jdisc.http.request.content_size.count")); metrics.add(new Metric("jdisc.http.request.content_size.average")); return metrics; } private static Set<Metric> getClusterControllerMetrics() { Set<Metric> metrics =new LinkedHashSet<>(); metrics.add(new Metric("cluster-controller.down.count.last")); metrics.add(new Metric("cluster-controller.initializing.count.last")); metrics.add(new Metric("cluster-controller.maintenance.count.last")); metrics.add(new Metric("cluster-controller.retired.count.last")); metrics.add(new Metric("cluster-controller.stopping.count.last")); metrics.add(new Metric("cluster-controller.up.count.last")); metrics.add(new Metric("cluster-controller.cluster-state-change.count")); metrics.add(new Metric("cluster-controller.is-master.last")); metrics.add(new Metric("cluster-controller.node-event.count")); return metrics; } private static Set<Metric> getDocprocMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("documents_processed.rate")); return metrics; } private static Set<Metric> getQrserverMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("peak_qps.max")); metrics.add(new Metric("search_connections.max")); metrics.add(new Metric("search_connections.sum")); metrics.add(new Metric("search_connections.count")); metrics.add(new Metric("search_connections.average")); metrics.add(new Metric("active_queries.max")); metrics.add(new Metric("active_queries.sum")); metrics.add(new Metric("active_queries.count")); metrics.add(new Metric("active_queries.average")); metrics.add(new Metric("feed.latency.max")); metrics.add(new Metric("feed.latency.sum")); metrics.add(new Metric("feed.latency.count")); metrics.add(new Metric("feed.latency.average")); metrics.add(new Metric("feed.http-requests.count")); metrics.add(new Metric("feed.http-requests.rate")); metrics.add(new Metric("queries.rate")); metrics.add(new Metric("query_container_latency.max")); metrics.add(new Metric("query_container_latency.sum")); metrics.add(new Metric("query_container_latency.count")); metrics.add(new Metric("query_container_latency.average")); metrics.add(new Metric("query_latency.max")); metrics.add(new Metric("query_latency.sum")); metrics.add(new Metric("query_latency.count")); metrics.add(new Metric("query_latency.average")); metrics.add(new Metric("query_latency.95percentile")); metrics.add(new Metric("query_latency.99percentile")); metrics.add(new Metric("failed_queries.rate")); metrics.add(new Metric("degraded_queries.rate")); metrics.add(new Metric("hits_per_query.max")); metrics.add(new Metric("hits_per_query.sum")); metrics.add(new Metric("hits_per_query.count")); metrics.add(new Metric("hits_per_query.average")); metrics.add(new Metric("query_hit_offset.max")); metrics.add(new Metric("query_hit_offset.sum")); metrics.add(new Metric("query_hit_offset.count")); metrics.add(new Metric("documents_covered.count")); metrics.add(new Metric("documents_total.count")); metrics.add(new Metric("dispatch_internal.rate")); metrics.add(new Metric("dispatch_fdispatch.rate")); metrics.add(new Metric("totalhits_per_query.max")); metrics.add(new Metric("totalhits_per_query.sum")); metrics.add(new Metric("totalhits_per_query.count")); metrics.add(new Metric("totalhits_per_query.average")); metrics.add(new Metric("empty_results.rate")); metrics.add(new Metric("requestsOverQuota.rate")); metrics.add(new Metric("requestsOverQuota.count")); metrics.add(new Metric("relevance.at_1.sum")); metrics.add(new Metric("relevance.at_1.count")); metrics.add(new Metric("relevance.at_1.average")); metrics.add(new Metric("relevance.at_3.sum")); metrics.add(new Metric("relevance.at_3.count")); metrics.add(new Metric("relevance.at_3.average")); metrics.add(new Metric("relevance.at_10.sum")); metrics.add(new Metric("relevance.at_10.count")); metrics.add(new Metric("relevance.at_10.average")); metrics.add(new Metric("error.timeout.rate")); metrics.add(new Metric("error.backends_oos.rate")); metrics.add(new Metric("error.plugin_failure.rate")); metrics.add(new Metric("error.backend_communication_error.rate")); metrics.add(new Metric("error.empty_document_summaries.rate")); metrics.add(new Metric("error.invalid_query_parameter.rate")); metrics.add(new Metric("error.internal_server_error.rate")); metrics.add(new Metric("error.misconfigured_server.rate")); metrics.add(new Metric("error.invalid_query_transformation.rate")); metrics.add(new Metric("error.result_with_errors.rate")); metrics.add(new Metric("error.unspecified.rate")); metrics.add(new Metric("error.unhandled_exception.rate")); return metrics; } private static Set<Metric> getSearchNodeMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("content.proton.documentdb.documents.total.last")); metrics.add(new Metric("content.proton.documentdb.documents.ready.last")); metrics.add(new Metric("content.proton.documentdb.documents.active.last")); metrics.add(new Metric("content.proton.documentdb.documents.removed.last")); metrics.add(new Metric("content.proton.documentdb.index.docs_in_memory.last")); metrics.add(new Metric("content.proton.documentdb.disk_usage.last")); metrics.add(new Metric("content.proton.documentdb.memory_usage.allocated_bytes.max")); metrics.add(new Metric("content.proton.transport.query.count.rate")); metrics.add(new Metric("content.proton.docsum.docs.rate")); metrics.add(new Metric("content.proton.docsum.latency.max")); metrics.add(new Metric("content.proton.docsum.latency.sum")); metrics.add(new Metric("content.proton.docsum.latency.count")); metrics.add(new Metric("content.proton.docsum.latency.average")); metrics.add(new Metric("content.proton.transport.query.latency.max")); metrics.add(new Metric("content.proton.transport.query.latency.sum")); metrics.add(new Metric("content.proton.transport.query.latency.count")); metrics.add(new Metric("content.proton.transport.query.latency.average")); metrics.add(new Metric("content.proton.search_protocol.query.latency.max")); metrics.add(new Metric("content.proton.search_protocol.query.latency.sum")); metrics.add(new Metric("content.proton.search_protocol.query.latency.count")); metrics.add(new Metric("content.proton.search_protocol.query.request_size.max")); metrics.add(new Metric("content.proton.search_protocol.query.request_size.sum")); metrics.add(new Metric("content.proton.search_protocol.query.request_size.count")); metrics.add(new Metric("content.proton.search_protocol.query.reply_size.max")); metrics.add(new Metric("content.proton.search_protocol.query.reply_size.sum")); metrics.add(new Metric("content.proton.search_protocol.query.reply_size.count")); metrics.add(new Metric("content.proton.search_protocol.docsum.latency.max")); metrics.add(new Metric("content.proton.search_protocol.docsum.latency.sum")); metrics.add(new Metric("content.proton.search_protocol.docsum.latency.count")); metrics.add(new Metric("content.proton.search_protocol.docsum.request_size.max")); metrics.add(new Metric("content.proton.search_protocol.docsum.request_size.sum")); metrics.add(new Metric("content.proton.search_protocol.docsum.request_size.count")); metrics.add(new Metric("content.proton.search_protocol.docsum.reply_size.max")); metrics.add(new Metric("content.proton.search_protocol.docsum.reply_size.sum")); metrics.add(new Metric("content.proton.search_protocol.docsum.reply_size.count")); metrics.add(new Metric("content.proton.search_protocol.docsum.requested_documents.count")); metrics.add(new Metric("content.proton.executor.proton.maxpending.last")); metrics.add(new Metric("content.proton.executor.proton.accepted.rate")); metrics.add(new Metric("content.proton.executor.flush.maxpending.last")); metrics.add(new Metric("content.proton.executor.flush.accepted.rate")); metrics.add(new Metric("content.proton.executor.match.maxpending.last")); metrics.add(new Metric("content.proton.executor.match.accepted.rate")); metrics.add(new Metric("content.proton.executor.docsum.maxpending.last")); metrics.add(new Metric("content.proton.executor.docsum.accepted.rate")); metrics.add(new Metric("content.proton.executor.shared.maxpending.last")); metrics.add(new Metric("content.proton.executor.shared.accepted.rate")); metrics.add(new Metric("content.proton.executor.warmup.maxpending.last")); metrics.add(new Metric("content.proton.executor.warmup.accepted.rate")); metrics.add(new Metric("content.proton.documentdb.job.total.average")); metrics.add(new Metric("content.proton.documentdb.job.attribute_flush.average")); metrics.add(new Metric("content.proton.documentdb.job.memory_index_flush.average")); metrics.add(new Metric("content.proton.documentdb.job.disk_index_fusion.average")); metrics.add(new Metric("content.proton.documentdb.job.document_store_flush.average")); metrics.add(new Metric("content.proton.documentdb.job.document_store_compact.average")); metrics.add(new Metric("content.proton.documentdb.job.bucket_move.average")); metrics.add(new Metric("content.proton.documentdb.job.lid_space_compact.average")); metrics.add(new Metric("content.proton.documentdb.job.removed_documents_prune.average")); metrics.add(new Metric("content.proton.documentdb.threading_service.master.maxpending.last")); metrics.add(new Metric("content.proton.documentdb.threading_service.master.accepted.rate")); metrics.add(new Metric("content.proton.documentdb.threading_service.index.maxpending.last")); metrics.add(new Metric("content.proton.documentdb.threading_service.index.accepted.rate")); metrics.add(new Metric("content.proton.documentdb.threading_service.summary.maxpending.last")); metrics.add(new Metric("content.proton.documentdb.threading_service.summary.accepted.rate")); metrics.add(new Metric("content.proton.documentdb.threading_service.index_field_inverter.maxpending.last")); metrics.add(new Metric("content.proton.documentdb.threading_service.index_field_inverter.accepted.rate")); metrics.add(new Metric("content.proton.documentdb.threading_service.index_field_writer.maxpending.last")); metrics.add(new Metric("content.proton.documentdb.threading_service.index_field_writer.accepted.rate")); metrics.add(new Metric("content.proton.documentdb.threading_service.attribute_field_writer.maxpending.last")); metrics.add(new Metric("content.proton.documentdb.threading_service.attribute_field_writer.accepted.rate")); metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_bloat_factor.average")); metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_bloat_factor.average")); metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_bloat_factor.average")); metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_fragmentation_factor.average")); metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_fragmentation_factor.average")); metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_fragmentation_factor.average")); metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_limit.last")); metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_limit.last")); metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_limit.last")); metrics.add(new Metric("content.proton.resource_usage.disk.average")); metrics.add(new Metric("content.proton.resource_usage.disk_utilization.average")); metrics.add(new Metric("content.proton.resource_usage.memory.average")); metrics.add(new Metric("content.proton.resource_usage.memory_utilization.average")); metrics.add(new Metric("content.proton.resource_usage.memory_mappings.max")); metrics.add(new Metric("content.proton.resource_usage.open_file_descriptors.max")); metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.enum_store.average")); metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.multi_value.average")); metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.feeding_blocked.last")); metrics.add(new Metric("content.proton.transactionlog.entries.average")); metrics.add(new Metric("content.proton.transactionlog.disk_usage.average")); metrics.add(new Metric("content.proton.transactionlog.replay_time.last")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.disk_usage.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.disk_bloat.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.max_bucket_spread.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.disk_usage.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.disk_bloat.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.max_bucket_spread.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.disk_usage.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.disk_bloat.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.max_bucket_spread.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.memory_usage.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.hit_rate.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.lookups.rate")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.invalidations.rate")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.memory_usage.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.hit_rate.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.lookups.rate")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.invalidations.rate")); metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.index.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.index.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.index.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.index.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.matching.queries.rate")); metrics.add(new Metric("content.proton.documentdb.matching.soft_doomed_queries.rate")); metrics.add(new Metric("content.proton.documentdb.matching.query_latency.max")); metrics.add(new Metric("content.proton.documentdb.matching.query_latency.sum")); metrics.add(new Metric("content.proton.documentdb.matching.query_latency.count")); metrics.add(new Metric("content.proton.documentdb.matching.query_latency.average")); metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.max")); metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.sum")); metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.count")); metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.average")); metrics.add(new Metric("content.proton.documentdb.matching.query_setup_time.max")); metrics.add(new Metric("content.proton.documentdb.matching.query_setup_time.sum")); metrics.add(new Metric("content.proton.documentdb.matching.query_setup_time.count")); metrics.add(new Metric("content.proton.documentdb.matching.docs_matched.rate")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.queries.rate")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doomed_queries.rate")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.min")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.average")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.average")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_setup_time.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_setup_time.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_setup_time.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.average")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.docs_matched.rate")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.limited_queries.rate")); return metrics; } }
class VespaMetricSet { public static final MetricSet vespaMetricSet = new MetricSet("vespa", getVespaMetrics(), singleton(defaultVespaMetricSet)); private static Set<Metric> getVespaMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.addAll(getSearchNodeMetrics()); metrics.addAll(getStorageMetrics()); metrics.addAll(getDocprocMetrics()); metrics.addAll(getClusterControllerMetrics()); metrics.addAll(getQrserverMetrics()); metrics.addAll(getContainerMetrics()); metrics.addAll(getConfigServerMetrics()); metrics.addAll(getSentinelMetrics()); metrics.addAll(getOtherMetrics()); return Collections.unmodifiableSet(metrics); } private static Set<Metric> getSentinelMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("sentinel.restarts.count")); metrics.add(new Metric("sentinel.totalRestarts.last")); metrics.add(new Metric("sentinel.uptime.last")); metrics.add(new Metric("sentinel.running.count")); metrics.add(new Metric("sentinel.running.last")); return metrics; } private static Set<Metric> getOtherMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("slobrok.heartbeats.failed.count")); metrics.add(new Metric("logd.processed.lines.count")); metrics.add(new Metric("jrt.transport.tls-certificate-verification-failures")); metrics.add(new Metric("jrt.transport.peer-authorization-failures")); metrics.add(new Metric("jrt.transport.server.tls-connections-established")); metrics.add(new Metric("jrt.transport.client.tls-connections-established")); metrics.add(new Metric("jrt.transport.server.unencrypted-connections-established")); metrics.add(new Metric("jrt.transport.client.unencrypted-connections-established")); metrics.add(new Metric("vds.server.network.tls-handshakes-failed")); metrics.add(new Metric("vds.server.network.peer-authorization-failures")); metrics.add(new Metric("vds.server.network.client.tls-connections-established")); metrics.add(new Metric("vds.server.network.server.tls-connections-established")); metrics.add(new Metric("vds.server.network.client.insecure-connections-established")); metrics.add(new Metric("vds.server.network.server.insecure-connections-established")); metrics.add(new Metric("vds.server.network.tls-connections-broken")); metrics.add(new Metric("vds.server.network.failed-tls-config-reloads")); metrics.add(new Metric("vds.server.fnet.num-connections")); return metrics; } private static Set<Metric> getConfigServerMetrics() { Set<Metric> metrics =new LinkedHashSet<>(); metrics.add(new Metric("configserver.requests.count")); metrics.add(new Metric("configserver.failedRequests.count")); metrics.add(new Metric("configserver.latency.max")); metrics.add(new Metric("configserver.latency.sum")); metrics.add(new Metric("configserver.latency.count")); metrics.add(new Metric("configserver.latency.average")); metrics.add(new Metric("configserver.cacheConfigElems.last")); metrics.add(new Metric("configserver.cacheChecksumElems.last")); metrics.add(new Metric("configserver.hosts.last")); metrics.add(new Metric("configserver.delayedResponses.count")); metrics.add(new Metric("configserver.sessionChangeErrors.count")); metrics.add(new Metric("configserver.zkZNodes.last")); metrics.add(new Metric("configserver.zkAvgLatency.last")); metrics.add(new Metric("configserver.zkMaxLatency.last")); metrics.add(new Metric("configserver.zkConnections.last")); metrics.add(new Metric("configserver.zkOutstandingRequests.last")); return metrics; } private static Set<Metric> getContainerMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("handled.requests.count")); metrics.add(new Metric("handled.latency.max")); metrics.add(new Metric("handled.latency.sum")); metrics.add(new Metric("handled.latency.count")); metrics.add(new Metric("handled.latency.average")); metrics.add(new Metric("serverRejectedRequests.rate")); metrics.add(new Metric("serverRejectedRequests.count")); metrics.add(new Metric("serverThreadPoolSize.average")); metrics.add(new Metric("serverThreadPoolSize.min")); metrics.add(new Metric("serverThreadPoolSize.max")); metrics.add(new Metric("serverThreadPoolSize.rate")); metrics.add(new Metric("serverThreadPoolSize.count")); metrics.add(new Metric("serverThreadPoolSize.last")); metrics.add(new Metric("serverActiveThreads.average")); metrics.add(new Metric("serverActiveThreads.min")); metrics.add(new Metric("serverActiveThreads.max")); metrics.add(new Metric("serverActiveThreads.rate")); metrics.add(new Metric("serverActiveThreads.sum")); metrics.add(new Metric("serverActiveThreads.count")); metrics.add(new Metric("serverActiveThreads.last")); metrics.add(new Metric("httpapi_latency.max")); metrics.add(new Metric("httpapi_latency.sum")); metrics.add(new Metric("httpapi_latency.count")); metrics.add(new Metric("httpapi_latency.average")); metrics.add(new Metric("httpapi_pending.max")); metrics.add(new Metric("httpapi_pending.sum")); metrics.add(new Metric("httpapi_pending.count")); metrics.add(new Metric("httpapi_pending.average")); metrics.add(new Metric("httpapi_num_operations.rate")); metrics.add(new Metric("httpapi_num_updates.rate")); metrics.add(new Metric("httpapi_num_removes.rate")); metrics.add(new Metric("httpapi_num_puts.rate")); metrics.add(new Metric("httpapi_succeeded.rate")); metrics.add(new Metric("httpapi_failed.rate")); metrics.add(new Metric("httpapi_parse_error.rate")); metrics.add(new Metric("mem.heap.total.average")); metrics.add(new Metric("mem.heap.free.average")); metrics.add(new Metric("mem.heap.used.average")); metrics.add(new Metric("mem.heap.used.max")); metrics.add(new Metric("jdisc.memory_mappings.max")); metrics.add(new Metric("jdisc.open_file_descriptors.max")); metrics.add(new Metric("jdisc.gc.count.average")); metrics.add(new Metric("jdisc.gc.count.max")); metrics.add(new Metric("jdisc.gc.count.last")); metrics.add(new Metric("jdisc.gc.ms.average")); metrics.add(new Metric("jdisc.gc.ms.max")); metrics.add(new Metric("jdisc.gc.ms.last")); metrics.add(new Metric("jdisc.deactivated_containers.total.last")); metrics.add(new Metric("jdisc.deactivated_containers.with_retained_refs.last")); metrics.add(new Metric("athenz-tenant-cert.expiry.seconds.last")); metrics.add(new Metric("jdisc.http.request.prematurely_closed.rate")); metrics.add(new Metric("http.status.1xx.rate")); metrics.add(new Metric("http.status.2xx.rate")); metrics.add(new Metric("http.status.3xx.rate")); metrics.add(new Metric("http.status.4xx.rate")); metrics.add(new Metric("http.status.5xx.rate")); metrics.add(new Metric("http.status.401.rate")); metrics.add(new Metric("http.status.403.rate")); metrics.add(new Metric("jdisc.http.request.uri_length.max")); metrics.add(new Metric("jdisc.http.request.uri_length.sum")); metrics.add(new Metric("jdisc.http.request.uri_length.count")); metrics.add(new Metric("jdisc.http.request.uri_length.average")); metrics.add(new Metric("jdisc.http.request.content_size.max")); metrics.add(new Metric("jdisc.http.request.content_size.sum")); metrics.add(new Metric("jdisc.http.request.content_size.count")); metrics.add(new Metric("jdisc.http.request.content_size.average")); return metrics; } private static Set<Metric> getClusterControllerMetrics() { Set<Metric> metrics =new LinkedHashSet<>(); metrics.add(new Metric("cluster-controller.down.count.last")); metrics.add(new Metric("cluster-controller.initializing.count.last")); metrics.add(new Metric("cluster-controller.maintenance.count.last")); metrics.add(new Metric("cluster-controller.retired.count.last")); metrics.add(new Metric("cluster-controller.stopping.count.last")); metrics.add(new Metric("cluster-controller.up.count.last")); metrics.add(new Metric("cluster-controller.cluster-state-change.count")); metrics.add(new Metric("cluster-controller.is-master.last")); metrics.add(new Metric("cluster-controller.node-event.count")); return metrics; } private static Set<Metric> getDocprocMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("documents_processed.rate")); return metrics; } private static Set<Metric> getQrserverMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("peak_qps.max")); metrics.add(new Metric("search_connections.max")); metrics.add(new Metric("search_connections.sum")); metrics.add(new Metric("search_connections.count")); metrics.add(new Metric("search_connections.average")); metrics.add(new Metric("active_queries.max")); metrics.add(new Metric("active_queries.sum")); metrics.add(new Metric("active_queries.count")); metrics.add(new Metric("active_queries.average")); metrics.add(new Metric("feed.latency.max")); metrics.add(new Metric("feed.latency.sum")); metrics.add(new Metric("feed.latency.count")); metrics.add(new Metric("feed.latency.average")); metrics.add(new Metric("feed.http-requests.count")); metrics.add(new Metric("feed.http-requests.rate")); metrics.add(new Metric("queries.rate")); metrics.add(new Metric("query_container_latency.max")); metrics.add(new Metric("query_container_latency.sum")); metrics.add(new Metric("query_container_latency.count")); metrics.add(new Metric("query_container_latency.average")); metrics.add(new Metric("query_latency.max")); metrics.add(new Metric("query_latency.sum")); metrics.add(new Metric("query_latency.count")); metrics.add(new Metric("query_latency.average")); metrics.add(new Metric("query_latency.95percentile")); metrics.add(new Metric("query_latency.99percentile")); metrics.add(new Metric("failed_queries.rate")); metrics.add(new Metric("degraded_queries.rate")); metrics.add(new Metric("hits_per_query.max")); metrics.add(new Metric("hits_per_query.sum")); metrics.add(new Metric("hits_per_query.count")); metrics.add(new Metric("hits_per_query.average")); metrics.add(new Metric("query_hit_offset.max")); metrics.add(new Metric("query_hit_offset.sum")); metrics.add(new Metric("query_hit_offset.count")); metrics.add(new Metric("documents_covered.count")); metrics.add(new Metric("documents_total.count")); metrics.add(new Metric("dispatch_internal.rate")); metrics.add(new Metric("dispatch_fdispatch.rate")); metrics.add(new Metric("totalhits_per_query.max")); metrics.add(new Metric("totalhits_per_query.sum")); metrics.add(new Metric("totalhits_per_query.count")); metrics.add(new Metric("totalhits_per_query.average")); metrics.add(new Metric("empty_results.rate")); metrics.add(new Metric("requestsOverQuota.rate")); metrics.add(new Metric("requestsOverQuota.count")); metrics.add(new Metric("relevance.at_1.sum")); metrics.add(new Metric("relevance.at_1.count")); metrics.add(new Metric("relevance.at_1.average")); metrics.add(new Metric("relevance.at_3.sum")); metrics.add(new Metric("relevance.at_3.count")); metrics.add(new Metric("relevance.at_3.average")); metrics.add(new Metric("relevance.at_10.sum")); metrics.add(new Metric("relevance.at_10.count")); metrics.add(new Metric("relevance.at_10.average")); metrics.add(new Metric("error.timeout.rate")); metrics.add(new Metric("error.backends_oos.rate")); metrics.add(new Metric("error.plugin_failure.rate")); metrics.add(new Metric("error.backend_communication_error.rate")); metrics.add(new Metric("error.empty_document_summaries.rate")); metrics.add(new Metric("error.invalid_query_parameter.rate")); metrics.add(new Metric("error.internal_server_error.rate")); metrics.add(new Metric("error.misconfigured_server.rate")); metrics.add(new Metric("error.invalid_query_transformation.rate")); metrics.add(new Metric("error.result_with_errors.rate")); metrics.add(new Metric("error.unspecified.rate")); metrics.add(new Metric("error.unhandled_exception.rate")); return metrics; } private static Set<Metric> getSearchNodeMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("content.proton.documentdb.documents.total.last")); metrics.add(new Metric("content.proton.documentdb.documents.ready.last")); metrics.add(new Metric("content.proton.documentdb.documents.active.last")); metrics.add(new Metric("content.proton.documentdb.documents.removed.last")); metrics.add(new Metric("content.proton.documentdb.index.docs_in_memory.last")); metrics.add(new Metric("content.proton.documentdb.disk_usage.last")); metrics.add(new Metric("content.proton.documentdb.memory_usage.allocated_bytes.max")); metrics.add(new Metric("content.proton.transport.query.count.rate")); metrics.add(new Metric("content.proton.docsum.docs.rate")); metrics.add(new Metric("content.proton.docsum.latency.max")); metrics.add(new Metric("content.proton.docsum.latency.sum")); metrics.add(new Metric("content.proton.docsum.latency.count")); metrics.add(new Metric("content.proton.docsum.latency.average")); metrics.add(new Metric("content.proton.transport.query.latency.max")); metrics.add(new Metric("content.proton.transport.query.latency.sum")); metrics.add(new Metric("content.proton.transport.query.latency.count")); metrics.add(new Metric("content.proton.transport.query.latency.average")); metrics.add(new Metric("content.proton.search_protocol.query.latency.max")); metrics.add(new Metric("content.proton.search_protocol.query.latency.sum")); metrics.add(new Metric("content.proton.search_protocol.query.latency.count")); metrics.add(new Metric("content.proton.search_protocol.query.request_size.max")); metrics.add(new Metric("content.proton.search_protocol.query.request_size.sum")); metrics.add(new Metric("content.proton.search_protocol.query.request_size.count")); metrics.add(new Metric("content.proton.search_protocol.query.reply_size.max")); metrics.add(new Metric("content.proton.search_protocol.query.reply_size.sum")); metrics.add(new Metric("content.proton.search_protocol.query.reply_size.count")); metrics.add(new Metric("content.proton.search_protocol.docsum.latency.max")); metrics.add(new Metric("content.proton.search_protocol.docsum.latency.sum")); metrics.add(new Metric("content.proton.search_protocol.docsum.latency.count")); metrics.add(new Metric("content.proton.search_protocol.docsum.request_size.max")); metrics.add(new Metric("content.proton.search_protocol.docsum.request_size.sum")); metrics.add(new Metric("content.proton.search_protocol.docsum.request_size.count")); metrics.add(new Metric("content.proton.search_protocol.docsum.reply_size.max")); metrics.add(new Metric("content.proton.search_protocol.docsum.reply_size.sum")); metrics.add(new Metric("content.proton.search_protocol.docsum.reply_size.count")); metrics.add(new Metric("content.proton.search_protocol.docsum.requested_documents.count")); metrics.add(new Metric("content.proton.executor.proton.maxpending.last")); metrics.add(new Metric("content.proton.executor.proton.accepted.rate")); metrics.add(new Metric("content.proton.executor.flush.maxpending.last")); metrics.add(new Metric("content.proton.executor.flush.accepted.rate")); metrics.add(new Metric("content.proton.executor.match.maxpending.last")); metrics.add(new Metric("content.proton.executor.match.accepted.rate")); metrics.add(new Metric("content.proton.executor.docsum.maxpending.last")); metrics.add(new Metric("content.proton.executor.docsum.accepted.rate")); metrics.add(new Metric("content.proton.executor.shared.maxpending.last")); metrics.add(new Metric("content.proton.executor.shared.accepted.rate")); metrics.add(new Metric("content.proton.executor.warmup.maxpending.last")); metrics.add(new Metric("content.proton.executor.warmup.accepted.rate")); metrics.add(new Metric("content.proton.documentdb.job.total.average")); metrics.add(new Metric("content.proton.documentdb.job.attribute_flush.average")); metrics.add(new Metric("content.proton.documentdb.job.memory_index_flush.average")); metrics.add(new Metric("content.proton.documentdb.job.disk_index_fusion.average")); metrics.add(new Metric("content.proton.documentdb.job.document_store_flush.average")); metrics.add(new Metric("content.proton.documentdb.job.document_store_compact.average")); metrics.add(new Metric("content.proton.documentdb.job.bucket_move.average")); metrics.add(new Metric("content.proton.documentdb.job.lid_space_compact.average")); metrics.add(new Metric("content.proton.documentdb.job.removed_documents_prune.average")); metrics.add(new Metric("content.proton.documentdb.threading_service.master.maxpending.last")); metrics.add(new Metric("content.proton.documentdb.threading_service.master.accepted.rate")); metrics.add(new Metric("content.proton.documentdb.threading_service.index.maxpending.last")); metrics.add(new Metric("content.proton.documentdb.threading_service.index.accepted.rate")); metrics.add(new Metric("content.proton.documentdb.threading_service.summary.maxpending.last")); metrics.add(new Metric("content.proton.documentdb.threading_service.summary.accepted.rate")); metrics.add(new Metric("content.proton.documentdb.threading_service.index_field_inverter.maxpending.last")); metrics.add(new Metric("content.proton.documentdb.threading_service.index_field_inverter.accepted.rate")); metrics.add(new Metric("content.proton.documentdb.threading_service.index_field_writer.maxpending.last")); metrics.add(new Metric("content.proton.documentdb.threading_service.index_field_writer.accepted.rate")); metrics.add(new Metric("content.proton.documentdb.threading_service.attribute_field_writer.maxpending.last")); metrics.add(new Metric("content.proton.documentdb.threading_service.attribute_field_writer.accepted.rate")); metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_bloat_factor.average")); metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_bloat_factor.average")); metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_bloat_factor.average")); metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_fragmentation_factor.average")); metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_fragmentation_factor.average")); metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_fragmentation_factor.average")); metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_limit.last")); metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_limit.last")); metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_limit.last")); metrics.add(new Metric("content.proton.resource_usage.disk.average")); metrics.add(new Metric("content.proton.resource_usage.disk_utilization.average")); metrics.add(new Metric("content.proton.resource_usage.memory.average")); metrics.add(new Metric("content.proton.resource_usage.memory_utilization.average")); metrics.add(new Metric("content.proton.resource_usage.memory_mappings.max")); metrics.add(new Metric("content.proton.resource_usage.open_file_descriptors.max")); metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.enum_store.average")); metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.multi_value.average")); metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.feeding_blocked.last")); metrics.add(new Metric("content.proton.transactionlog.entries.average")); metrics.add(new Metric("content.proton.transactionlog.disk_usage.average")); metrics.add(new Metric("content.proton.transactionlog.replay_time.last")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.disk_usage.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.disk_bloat.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.max_bucket_spread.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.disk_usage.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.disk_bloat.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.max_bucket_spread.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.disk_usage.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.disk_bloat.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.max_bucket_spread.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.memory_usage.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.hit_rate.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.lookups.rate")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.invalidations.rate")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.memory_usage.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.hit_rate.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.lookups.rate")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.invalidations.rate")); metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.index.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.index.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.index.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.index.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.matching.queries.rate")); metrics.add(new Metric("content.proton.documentdb.matching.soft_doomed_queries.rate")); metrics.add(new Metric("content.proton.documentdb.matching.query_latency.max")); metrics.add(new Metric("content.proton.documentdb.matching.query_latency.sum")); metrics.add(new Metric("content.proton.documentdb.matching.query_latency.count")); metrics.add(new Metric("content.proton.documentdb.matching.query_latency.average")); metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.max")); metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.sum")); metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.count")); metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.average")); metrics.add(new Metric("content.proton.documentdb.matching.query_setup_time.max")); metrics.add(new Metric("content.proton.documentdb.matching.query_setup_time.sum")); metrics.add(new Metric("content.proton.documentdb.matching.query_setup_time.count")); metrics.add(new Metric("content.proton.documentdb.matching.docs_matched.rate")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.queries.rate")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doomed_queries.rate")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.min")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.average")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.average")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_setup_time.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_setup_time.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_setup_time.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.average")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.docs_matched.rate")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.limited_queries.rate")); return metrics; } }
Nit-pick: `requireNonNull` returns the non-null value so you can simplify to `this.value = Objects.requireNonNull(value)`.
public FileReference(String value) { Objects.requireNonNull(value); this.value = value; }
Objects.requireNonNull(value);
public FileReference(String value) { Objects.requireNonNull(value); this.value = value; }
class FileReference { private final String value; public String value() { return value; } @Override public int hashCode() { return value.hashCode(); } @Override public boolean equals(Object other) { return other instanceof FileReference && value.equals(((FileReference)other).value); } @Override public String toString() { return "file '" + value + "'"; } public static List<String> toValues(Collection<FileReference> references) { List<String> ret = new ArrayList<String>(); for (FileReference r: references) { ret.add(r.value()); } return ret; } public static Map<String, String> toValueMap(Map<String, FileReference> map) { Map<String, String> ret = new LinkedHashMap<>(); for (Map.Entry<String, FileReference> e : map.entrySet()) { ret.put(e.getKey(), e.getValue().value()); } return ret; } public static FileReference mockFileReferenceForUnitTesting(File file) { if (! file.exists()) throw new IllegalArgumentException("File '" + file.getAbsolutePath() + "' does not exist."); return new FileReference(file.getPath()); } }
class FileReference { private final String value; public String value() { return value; } @Override public int hashCode() { return value.hashCode(); } @Override public boolean equals(Object other) { return other instanceof FileReference && value.equals(((FileReference)other).value); } @Override public String toString() { return "file '" + value + "'"; } public static List<String> toValues(Collection<FileReference> references) { List<String> ret = new ArrayList<String>(); for (FileReference r: references) { ret.add(r.value()); } return ret; } public static Map<String, String> toValueMap(Map<String, FileReference> map) { Map<String, String> ret = new LinkedHashMap<>(); for (Map.Entry<String, FileReference> e : map.entrySet()) { ret.put(e.getKey(), e.getValue().value()); } return ret; } public static FileReference mockFileReferenceForUnitTesting(File file) { if (! file.exists()) throw new IllegalArgumentException("File '" + file.getAbsolutePath() + "' does not exist."); return new FileReference(file.getPath()); } }
`/home/y` -> `environment.pathInNodeUnderVespaHome()`
public void writeMetricsConfig(ContainerName containerName, NodeSpec node) { List<SecretAgentCheckConfig> configs = new ArrayList<>(); Path vespaCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa"); SecretAgentCheckConfig vespaSchedule = new SecretAgentCheckConfig("vespa", 60, vespaCheckPath, "all"); configs.add(annotatedCheck(node, vespaSchedule)); Path vespaHealthCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa_health"); SecretAgentCheckConfig vespaHealthSchedule = new SecretAgentCheckConfig("vespa", 60, vespaHealthCheckPath, "all"); configs.add(annotatedCheck(node, vespaHealthSchedule)); Path hostLifeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_host_life"); SecretAgentCheckConfig hostLifeSchedule = new SecretAgentCheckConfig("host-life", 60, hostLifeCheckPath); configs.add(annotatedCheck(node, hostLifeSchedule)); Path ntpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ntp"); SecretAgentCheckConfig ntpSchedule = new SecretAgentCheckConfig("host-life", 60, ntpCheckPath); configs.add(annotatedCheck(node, ntpSchedule)); Path coredumpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_coredumps"); SecretAgentCheckConfig coredumpSchedule = new SecretAgentCheckConfig("system-coredumps-processing", 300, coredumpCheckPath, "--application", "system-coredumps-processing", "--lastmin", "129600", "--crit", "1", "--coredir", "/home/y/var/crash/processing"); configs.add(annotatedCheck(node, coredumpSchedule)); if (node.getNodeType() == NodeType.config) { Path configServerCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ymonsb2"); SecretAgentCheckConfig configServerSchedule = new SecretAgentCheckConfig("configserver", 60, configServerCheckPath, "-zero", "configserver"); configs.add(annotatedCheck(node, configServerSchedule)); Path zkbackupCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig zkbackupSchedule = new SecretAgentCheckConfig("zkbackupage", 300, zkbackupCheckPath, "-f", "/home/y/var/vespa-hosted/zkbackup.stat", "-m", "150", "-a", "config-zkbackupage"); configs.add(annotatedCheck(node, zkbackupSchedule)); } if (node.getNodeType() == NodeType.proxy) { Path routingAgeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig routingAgeSchedule = new SecretAgentCheckConfig("routing-configage", 60, routingAgeCheckPath, "-f", "home/y/var/vespa-hosted/routing/nginx.conf", "-m", "90", "-a", "routing-configage"); configs.add(annotatedCheck(node, routingAgeSchedule)); Path sslCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ssl_status"); SecretAgentCheckConfig sslSchedule = new SecretAgentCheckConfig("zkbackupage", 300, sslCheckPath, "-e", "localhost", "-p", "4443", "-t", "30"); configs.add(annotatedCheck(node, sslSchedule)); } Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/etc/yamas-agent/")); configs.forEach(s -> IOExceptionUtil.uncheck(() -> s.writeTo(yamasAgentFolder))); final String[] restartYamasAgent = new String[]{"service", "yamas-agent", "restart"}; dockerOperations.executeCommandInContainerAsRoot(containerName, restartYamasAgent); }
"129600", "--crit", "1", "--coredir", "/home/y/var/crash/processing");
public void writeMetricsConfig(ContainerName containerName, NodeSpec node) { List<SecretAgentCheckConfig> configs = new ArrayList<>(); Path hostLifeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_host_life"); SecretAgentCheckConfig hostLifeSchedule = new SecretAgentCheckConfig("host-life", 60, hostLifeCheckPath); configs.add(annotatedCheck(node, hostLifeSchedule)); Path ntpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ntp"); SecretAgentCheckConfig ntpSchedule = new SecretAgentCheckConfig("ntp", 60, ntpCheckPath); configs.add(annotatedCheck(node, ntpSchedule)); Path coredumpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_coredumps"); SecretAgentCheckConfig coredumpSchedule = new SecretAgentCheckConfig("system-coredumps-processing", 300, coredumpCheckPath, "--application", "system-coredumps-processing", "--lastmin", "129600", "--crit", "1", "--coredir", environment.pathInNodeUnderVespaHome("var/crash/processing").toString()); configs.add(annotatedCheck(node, coredumpSchedule)); if (node.getNodeType() != NodeType.config) { Path vespaHealthCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa_health"); SecretAgentCheckConfig vespaHealthSchedule = new SecretAgentCheckConfig("vespa-health", 60, vespaHealthCheckPath, "all"); configs.add(annotatedCheck(node, vespaHealthSchedule)); Path vespaCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa"); SecretAgentCheckConfig vespaSchedule = new SecretAgentCheckConfig("vespa", 60, vespaCheckPath, "all"); configs.add(annotatedCheck(node, vespaSchedule)); } if (node.getNodeType() == NodeType.config) { Path configServerCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ymonsb2"); SecretAgentCheckConfig configServerSchedule = new SecretAgentCheckConfig("configserver", 60, configServerCheckPath, "-zero", "configserver"); configs.add(annotatedCheck(node, configServerSchedule)); Path zkbackupCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig zkbackupSchedule = new SecretAgentCheckConfig("zkbackupage", 300, zkbackupCheckPath, "-f", environment.pathInNodeUnderVespaHome("var/vespa-hosted/zkbackup.stat").toString(), "-m", "150", "-a", "config-zkbackupage"); configs.add(annotatedCheck(node, zkbackupSchedule)); } if (node.getNodeType() == NodeType.proxy) { Path routingAgeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig routingAgeSchedule = new SecretAgentCheckConfig("routing-configage", 60, routingAgeCheckPath, "-f", environment.pathInNodeUnderVespaHome("var/vespa-hosted/routing/nginx.conf").toString(), "-m", "90", "-a", "routing-configage"); configs.add(annotatedCheck(node, routingAgeSchedule)); Path sslCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ssl_status"); SecretAgentCheckConfig sslSchedule = new SecretAgentCheckConfig("ssl-status", 300, sslCheckPath, "-e", "localhost", "-p", "4443", "-t", "30"); configs.add(annotatedCheck(node, sslSchedule)); } Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/etc/yamas-agent/")); configs.forEach(s -> IOExceptionUtil.uncheck(() -> s.writeTo(yamasAgentFolder))); final String[] restartYamasAgent = new String[]{"service", "yamas-agent", "restart"}; dockerOperations.executeCommandInContainerAsRoot(containerName, restartYamasAgent); }
class StorageMaintainer { private static final ContainerName NODE_ADMIN = new ContainerName("node-admin"); private static final ObjectMapper objectMapper = new ObjectMapper(); private final GaugeWrapper numberOfCoredumpsOnHost; private final CounterWrapper numberOfNodeAdminMaintenanceFails; private final DockerOperations dockerOperations; private final ProcessExecuter processExecuter; private final Environment environment; private final Clock clock; private Map<ContainerName, MaintenanceThrottler> maintenanceThrottlerByContainerName = new ConcurrentHashMap<>(); public StorageMaintainer(DockerOperations dockerOperations, ProcessExecuter processExecuter, MetricReceiverWrapper metricReceiver, Environment environment, Clock clock) { this.dockerOperations = dockerOperations; this.processExecuter = processExecuter; this.environment = environment; this.clock = clock; Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); numberOfNodeAdminMaintenanceFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.maintenance.fails"); numberOfCoredumpsOnHost = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.coredumps"); } private SecretAgentCheckConfig annotatedCheck(NodeSpec node, SecretAgentCheckConfig check) { check.withTag("namespace", "Vespa") .withTag("role", "tenants") .withTag("flavor", node.getFlavor()) .withTag("canonicalFlavor", node.getCanonicalFlavor()) .withTag("state", node.getState().toString()) .withTag("zone", environment.getZone()) .withTag("parentHostname", environment.getParentHostHostname()); node.getOwner().ifPresent(owner -> check .withTag("tenantName", owner.getTenant()) .withTag("app", owner.getApplication() + "." + owner.getInstance()) .withTag("applicationName", owner.getApplication()) .withTag("instanceName", owner.getInstance()) .withTag("applicationId", owner.getTenant() + "." + owner.getApplication() + "." + owner.getInstance())); node.getMembership().ifPresent(membership -> check .withTag("clustertype", membership.getClusterType()) .withTag("clusterid", membership.getClusterId())); node.getVespaVersion().ifPresent(version -> check.withTag("vespaVersion", version)); return check; } public void writeFilebeatConfig(ContainerName containerName, NodeSpec node) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); try { FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment); Optional<String> config = filebeatConfigProvider.getConfig(node); if (!config.isPresent()) { logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + node.toString()); return; } Path filebeatPath = environment.pathInNodeAdminFromPathInNode( containerName, Paths.get("/etc/filebeat/filebeat.yml")); Files.write(filebeatPath, config.get().getBytes()); logger.info("Wrote filebeat config."); } catch (Throwable t) { logger.error("Failed writing filebeat config; " + node, t); } } public Optional<Long> getDiskUsageFor(ContainerName containerName) { Path containerDir = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/home/")); try { return Optional.of(getDiskUsedInBytes(containerDir)); } catch (Throwable e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); logger.error("Problems during disk usage calculations in " + containerDir.toAbsolutePath(), e); return Optional.empty(); } } long getDiskUsedInBytes(Path path) throws IOException, InterruptedException { if (!Files.exists(path)) { return 0; } final String[] command = {"du", "-xsk", path.toString()}; Process duCommand = new ProcessBuilder().command(command).start(); if (!duCommand.waitFor(60, TimeUnit.SECONDS)) { duCommand.destroy(); throw new RuntimeException("Disk usage command timed out, aborting."); } String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream())); String[] results = output.split("\t"); if (results.length != 2) { throw new RuntimeException("Result from disk usage command not as expected: " + output); } long diskUsageKB = Long.valueOf(results[0]); return diskUsageKB * 1024; } /** * Deletes old log files for vespa, nginx, logstash, etc. */ public void removeOldFilesFromNode(ContainerName containerName) { if (! getMaintenanceThrottlerFor(containerName).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextRemoveOldFilesTime(); } private void addRemoveOldFilesCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName) { Path[] pathsToClean = { environment.pathInNodeUnderVespaHome("logs/elasticsearch2"), environment.pathInNodeUnderVespaHome("logs/logstash2"), environment.pathInNodeUnderVespaHome("logs/daemontools_y"), environment.pathInNodeUnderVespaHome("logs/nginx"), environment.pathInNodeUnderVespaHome("logs/vespa") }; for (Path pathToClean : pathsToClean) { Path path = environment.pathInNodeAdminFromPathInNode(containerName, pathToClean); if (Files.exists(path)) { maintainerExecutor.addJob("delete-files") .withArgument("basePath", path) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*\\.log.+") .withArgument("recursive", false); } } Path qrsDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/qrs")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", qrsDir) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*QueryAccessLog.*") .withArgument("recursive", false); Path logArchiveDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/logarchive")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", logArchiveDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); } /** * Checks if container has any new coredumps, reports and archives them if so * * @param force Set to true to bypass throttling */ public void handleCoreDumpsForContainer(ContainerName containerName, NodeSpec node, boolean force) { IOExceptionUtil.uncheck(() -> numberOfCoredumpsOnHost.sample(Files.list(environment.pathInNodeAdminToDoneCoredumps()).count())); if (! getMaintenanceThrottlerFor(containerName).shouldHandleCoredumpsNow() && !force) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextHandleCoredumpsTime(); } private void addHandleCoredumpsCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName, NodeSpec node) { if (!environment.getCoredumpFeedEndpoint().isPresent()) { return; } Map<String, Object> attributes = new HashMap<>(); attributes.put("hostname", node.getHostname()); attributes.put("parent_hostname", environment.getParentHostHostname()); attributes.put("region", environment.getRegion()); attributes.put("environment", environment.getEnvironment()); attributes.put("flavor", node.getFlavor()); attributes.put("kernel_version", System.getProperty("os.version")); node.getCurrentDockerImage().ifPresent(image -> attributes.put("docker_image", image.asString())); node.getVespaVersion().ifPresent(version -> attributes.put("vespa_version", version)); node.getOwner().ifPresent(owner -> { attributes.put("tenant", owner.getTenant()); attributes.put("application", owner.getApplication()); attributes.put("instance", owner.getInstance()); }); maintainerExecutor.addJob("handle-core-dumps") .withArgument("doneCoredumpsPath", environment.pathInNodeAdminToDoneCoredumps()) .withArgument("coredumpsPath", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/crash"))) .withArgument("feedEndpoint", environment.getCoredumpFeedEndpoint().get()) .withArgument("attributes", attributes); } /** * Deletes old * * archived app data * * Vespa logs * * Filedistribution files */ public void cleanNodeAdmin() { if (! getMaintenanceThrottlerFor(NODE_ADMIN).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); maintainerExecutor.addJob("delete-directories") .withArgument("basePath", environment.getPathResolver().getApplicationStoragePathForNodeAdmin()) .withArgument("maxAgeSeconds", Duration.ofDays(7).getSeconds()) .withArgument("dirNameRegex", "^" + Pattern.quote(Environment.APPLICATION_STORAGE_CLEANUP_PATH_PREFIX)); Path nodeAdminJDiskLogsPath = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("logs/vespa/")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", nodeAdminJDiskLogsPath) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); maintainerExecutor.execute(); getMaintenanceThrottlerFor(NODE_ADMIN).updateNextRemoveOldFilesTime(); } /** * Prepares the container-storage for the next container by deleting/archiving all the data of the current container. * Removes old files, reports coredumps and archives container data, runs when container enters state "dirty" */ public void cleanupNodeStorage(ContainerName containerName, NodeSpec node) { MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); addArchiveNodeData(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).reset(); } private void addArchiveNodeData(MaintainerExecutor maintainerExecutor, ContainerName containerName) { maintainerExecutor.addJob("recursive-delete") .withArgument("path", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var"))); maintainerExecutor.addJob("move-files") .withArgument("from", environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/"))) .withArgument("to", environment.pathInNodeAdminToNodeCleanup(containerName)); } /** * Runs node-maintainer's SpecVerifier and returns its output * @param node Node specification containing the excepted values we want to verify against * @return new combined hardware divergence * @throws RuntimeException if exit code != 0 */ public String getHardwareDivergence(NodeSpec node) { List<String> arguments = new ArrayList<>(Arrays.asList("specification", "--disk", Double.toString(node.getMinDiskAvailableGb()), "--memory", Double.toString(node.getMinMainMemoryAvailableGb()), "--cpu_cores", Double.toString(node.getMinCpuCores()), "--is_ssd", Boolean.toString(node.isFastDisk()), "--ips", String.join(",", node.getIpAddresses()))); node.getHardwareDivergence().ifPresent(hardwareDivergence -> { arguments.add("--divergence"); arguments.add(hardwareDivergence); }); return executeMaintainer("com.yahoo.vespa.hosted.node.verification.Main", arguments.toArray(new String[0])); } private String executeMaintainer(String mainClass, String... args) { String[] command = Stream.concat( Stream.of("sudo", "VESPA_HOME=" + getDefaults().vespaHome(), getDefaults().underVespaHome("libexec/vespa/node-admin/maintenance.sh"), mainClass), Stream.of(args)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(command); if (result.getFirst() != 0) { numberOfNodeAdminMaintenanceFails.add(); throw new RuntimeException( String.format("Maintainer failed to execute command: %s, Exit code: %d, Stdout/stderr: %s", Arrays.toString(command), result.getFirst(), result.getSecond())); } return result.getSecond().trim(); } catch (IOException e) { throw new RuntimeException("Failed to execute maintainer", e); } } /** * Wrapper for node-admin-maintenance, queues up maintenances jobs and sends a single request to maintenance JVM */ private class MaintainerExecutor { private final List<MaintainerExecutorJob> jobs = new ArrayList<>(); MaintainerExecutorJob addJob(String jobName) { MaintainerExecutorJob job = new MaintainerExecutorJob(jobName); jobs.add(job); return job; } void execute() { String args; try { args = objectMapper.writeValueAsString(jobs); } catch (JsonProcessingException e) { throw new RuntimeException("Failed transform list of maintenance jobs to JSON"); } executeMaintainer("com.yahoo.vespa.hosted.node.maintainer.Maintainer", args); } } private class MaintainerExecutorJob { @JsonProperty(value="type") private final String type; @JsonProperty(value="arguments") private final Map<String, Object> arguments = new HashMap<>(); MaintainerExecutorJob(String type) { this.type = type; } MaintainerExecutorJob withArgument(String argument, Object value) { arguments.put(argument, (value instanceof Path) ? value.toString() : value); return this; } } private MaintenanceThrottler getMaintenanceThrottlerFor(ContainerName containerName) { maintenanceThrottlerByContainerName.putIfAbsent(containerName, new MaintenanceThrottler()); return maintenanceThrottlerByContainerName.get(containerName); } private class MaintenanceThrottler { private Instant nextRemoveOldFilesAt = Instant.EPOCH; private Instant nextHandleOldCoredumpsAt = Instant.EPOCH; void updateNextRemoveOldFilesTime() { nextRemoveOldFilesAt = clock.instant().plus(Duration.ofHours(1)); } boolean shouldRemoveOldFilesNow() { return !nextRemoveOldFilesAt.isAfter(clock.instant()); } void updateNextHandleCoredumpsTime() { nextHandleOldCoredumpsAt = clock.instant().plus(Duration.ofMinutes(5)); } boolean shouldHandleCoredumpsNow() { return !nextHandleOldCoredumpsAt.isAfter(clock.instant()); } void reset() { nextRemoveOldFilesAt = Instant.EPOCH; nextHandleOldCoredumpsAt = Instant.EPOCH; } } }
class StorageMaintainer { private static final ContainerName NODE_ADMIN = new ContainerName("node-admin"); private static final ObjectMapper objectMapper = new ObjectMapper(); private final GaugeWrapper numberOfCoredumpsOnHost; private final CounterWrapper numberOfNodeAdminMaintenanceFails; private final DockerOperations dockerOperations; private final ProcessExecuter processExecuter; private final Environment environment; private final Clock clock; private Map<ContainerName, MaintenanceThrottler> maintenanceThrottlerByContainerName = new ConcurrentHashMap<>(); public StorageMaintainer(DockerOperations dockerOperations, ProcessExecuter processExecuter, MetricReceiverWrapper metricReceiver, Environment environment, Clock clock) { this.dockerOperations = dockerOperations; this.processExecuter = processExecuter; this.environment = environment; this.clock = clock; Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); numberOfNodeAdminMaintenanceFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.maintenance.fails"); numberOfCoredumpsOnHost = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.coredumps"); } private SecretAgentCheckConfig annotatedCheck(NodeSpec node, SecretAgentCheckConfig check) { check.withTag("namespace", "Vespa") .withTag("role", "tenants") .withTag("flavor", node.getFlavor()) .withTag("canonicalFlavor", node.getCanonicalFlavor()) .withTag("state", node.getState().toString()) .withTag("zone", environment.getZone()) .withTag("parentHostname", environment.getParentHostHostname()); node.getOwner().ifPresent(owner -> check .withTag("tenantName", owner.getTenant()) .withTag("app", owner.getApplication() + "." + owner.getInstance()) .withTag("applicationName", owner.getApplication()) .withTag("instanceName", owner.getInstance()) .withTag("applicationId", owner.getTenant() + "." + owner.getApplication() + "." + owner.getInstance())); node.getMembership().ifPresent(membership -> check .withTag("clustertype", membership.getClusterType()) .withTag("clusterid", membership.getClusterId())); node.getVespaVersion().ifPresent(version -> check.withTag("vespaVersion", version)); return check; } public void writeFilebeatConfig(ContainerName containerName, NodeSpec node) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); try { FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment); Optional<String> config = filebeatConfigProvider.getConfig(node); if (!config.isPresent()) { logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + node.toString()); return; } Path filebeatPath = environment.pathInNodeAdminFromPathInNode( containerName, Paths.get("/etc/filebeat/filebeat.yml")); Files.write(filebeatPath, config.get().getBytes()); logger.info("Wrote filebeat config."); } catch (Throwable t) { logger.error("Failed writing filebeat config; " + node, t); } } public Optional<Long> getDiskUsageFor(ContainerName containerName) { Path containerDir = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/home/")); try { return Optional.of(getDiskUsedInBytes(containerDir)); } catch (Throwable e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); logger.error("Problems during disk usage calculations in " + containerDir.toAbsolutePath(), e); return Optional.empty(); } } long getDiskUsedInBytes(Path path) throws IOException, InterruptedException { if (!Files.exists(path)) { return 0; } final String[] command = {"du", "-xsk", path.toString()}; Process duCommand = new ProcessBuilder().command(command).start(); if (!duCommand.waitFor(60, TimeUnit.SECONDS)) { duCommand.destroy(); throw new RuntimeException("Disk usage command timed out, aborting."); } String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream())); String[] results = output.split("\t"); if (results.length != 2) { throw new RuntimeException("Result from disk usage command not as expected: " + output); } long diskUsageKB = Long.valueOf(results[0]); return diskUsageKB * 1024; } /** * Deletes old log files for vespa, nginx, logstash, etc. */ public void removeOldFilesFromNode(ContainerName containerName) { if (! getMaintenanceThrottlerFor(containerName).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextRemoveOldFilesTime(); } private void addRemoveOldFilesCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName) { Path[] pathsToClean = { environment.pathInNodeUnderVespaHome("logs/elasticsearch2"), environment.pathInNodeUnderVespaHome("logs/logstash2"), environment.pathInNodeUnderVespaHome("logs/daemontools_y"), environment.pathInNodeUnderVespaHome("logs/nginx"), environment.pathInNodeUnderVespaHome("logs/vespa") }; for (Path pathToClean : pathsToClean) { Path path = environment.pathInNodeAdminFromPathInNode(containerName, pathToClean); if (Files.exists(path)) { maintainerExecutor.addJob("delete-files") .withArgument("basePath", path) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*\\.log.+") .withArgument("recursive", false); } } Path qrsDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/qrs")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", qrsDir) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*QueryAccessLog.*") .withArgument("recursive", false); Path logArchiveDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/logarchive")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", logArchiveDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); } /** * Checks if container has any new coredumps, reports and archives them if so * * @param force Set to true to bypass throttling */ public void handleCoreDumpsForContainer(ContainerName containerName, NodeSpec node, boolean force) { try { numberOfCoredumpsOnHost.sample(Files.list(environment.pathInNodeAdminToDoneCoredumps()).count()); } catch (IOException e) { } if (! getMaintenanceThrottlerFor(containerName).shouldHandleCoredumpsNow() && !force) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextHandleCoredumpsTime(); } private void addHandleCoredumpsCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName, NodeSpec node) { if (!environment.getCoredumpFeedEndpoint().isPresent()) { return; } Map<String, Object> attributes = new HashMap<>(); attributes.put("hostname", node.getHostname()); attributes.put("parent_hostname", environment.getParentHostHostname()); attributes.put("region", environment.getRegion()); attributes.put("environment", environment.getEnvironment()); attributes.put("flavor", node.getFlavor()); attributes.put("kernel_version", System.getProperty("os.version")); node.getCurrentDockerImage().ifPresent(image -> attributes.put("docker_image", image.asString())); node.getVespaVersion().ifPresent(version -> attributes.put("vespa_version", version)); node.getOwner().ifPresent(owner -> { attributes.put("tenant", owner.getTenant()); attributes.put("application", owner.getApplication()); attributes.put("instance", owner.getInstance()); }); maintainerExecutor.addJob("handle-core-dumps") .withArgument("doneCoredumpsPath", environment.pathInNodeAdminToDoneCoredumps()) .withArgument("coredumpsPath", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/crash"))) .withArgument("feedEndpoint", environment.getCoredumpFeedEndpoint().get()) .withArgument("attributes", attributes); } /** * Deletes old * * archived app data * * Vespa logs * * Filedistribution files */ public void cleanNodeAdmin() { if (! getMaintenanceThrottlerFor(NODE_ADMIN).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); maintainerExecutor.addJob("delete-directories") .withArgument("basePath", environment.getPathResolver().getApplicationStoragePathForNodeAdmin()) .withArgument("maxAgeSeconds", Duration.ofDays(7).getSeconds()) .withArgument("dirNameRegex", "^" + Pattern.quote(Environment.APPLICATION_STORAGE_CLEANUP_PATH_PREFIX)); Path nodeAdminJDiskLogsPath = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("logs/vespa/")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", nodeAdminJDiskLogsPath) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); maintainerExecutor.execute(); getMaintenanceThrottlerFor(NODE_ADMIN).updateNextRemoveOldFilesTime(); } /** * Prepares the container-storage for the next container by deleting/archiving all the data of the current container. * Removes old files, reports coredumps and archives container data, runs when container enters state "dirty" */ public void cleanupNodeStorage(ContainerName containerName, NodeSpec node) { MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); addArchiveNodeData(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).reset(); } private void addArchiveNodeData(MaintainerExecutor maintainerExecutor, ContainerName containerName) { maintainerExecutor.addJob("recursive-delete") .withArgument("path", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var"))); maintainerExecutor.addJob("move-files") .withArgument("from", environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/"))) .withArgument("to", environment.pathInNodeAdminToNodeCleanup(containerName)); } /** * Runs node-maintainer's SpecVerifier and returns its output * @param node Node specification containing the excepted values we want to verify against * @return new combined hardware divergence * @throws RuntimeException if exit code != 0 */ public String getHardwareDivergence(NodeSpec node) { List<String> arguments = new ArrayList<>(Arrays.asList("specification", "--disk", Double.toString(node.getMinDiskAvailableGb()), "--memory", Double.toString(node.getMinMainMemoryAvailableGb()), "--cpu_cores", Double.toString(node.getMinCpuCores()), "--is_ssd", Boolean.toString(node.isFastDisk()), "--ips", String.join(",", node.getIpAddresses()))); node.getHardwareDivergence().ifPresent(hardwareDivergence -> { arguments.add("--divergence"); arguments.add(hardwareDivergence); }); return executeMaintainer("com.yahoo.vespa.hosted.node.verification.Main", arguments.toArray(new String[0])); } private String executeMaintainer(String mainClass, String... args) { String[] command = Stream.concat( Stream.of("sudo", "VESPA_HOME=" + getDefaults().vespaHome(), getDefaults().underVespaHome("libexec/vespa/node-admin/maintenance.sh"), mainClass), Stream.of(args)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(command); if (result.getFirst() != 0) { numberOfNodeAdminMaintenanceFails.add(); throw new RuntimeException( String.format("Maintainer failed to execute command: %s, Exit code: %d, Stdout/stderr: %s", Arrays.toString(command), result.getFirst(), result.getSecond())); } return result.getSecond().trim(); } catch (IOException e) { throw new RuntimeException("Failed to execute maintainer", e); } } /** * Wrapper for node-admin-maintenance, queues up maintenances jobs and sends a single request to maintenance JVM */ private class MaintainerExecutor { private final List<MaintainerExecutorJob> jobs = new ArrayList<>(); MaintainerExecutorJob addJob(String jobName) { MaintainerExecutorJob job = new MaintainerExecutorJob(jobName); jobs.add(job); return job; } void execute() { String args; try { args = objectMapper.writeValueAsString(jobs); } catch (JsonProcessingException e) { throw new RuntimeException("Failed transform list of maintenance jobs to JSON"); } executeMaintainer("com.yahoo.vespa.hosted.node.maintainer.Maintainer", args); } } private class MaintainerExecutorJob { @JsonProperty(value="type") private final String type; @JsonProperty(value="arguments") private final Map<String, Object> arguments = new HashMap<>(); MaintainerExecutorJob(String type) { this.type = type; } MaintainerExecutorJob withArgument(String argument, Object value) { arguments.put(argument, (value instanceof Path) ? value.toString() : value); return this; } } private MaintenanceThrottler getMaintenanceThrottlerFor(ContainerName containerName) { maintenanceThrottlerByContainerName.putIfAbsent(containerName, new MaintenanceThrottler()); return maintenanceThrottlerByContainerName.get(containerName); } private class MaintenanceThrottler { private Instant nextRemoveOldFilesAt = Instant.EPOCH; private Instant nextHandleOldCoredumpsAt = Instant.EPOCH; void updateNextRemoveOldFilesTime() { nextRemoveOldFilesAt = clock.instant().plus(Duration.ofHours(1)); } boolean shouldRemoveOldFilesNow() { return !nextRemoveOldFilesAt.isAfter(clock.instant()); } void updateNextHandleCoredumpsTime() { nextHandleOldCoredumpsAt = clock.instant().plus(Duration.ofMinutes(5)); } boolean shouldHandleCoredumpsNow() { return !nextHandleOldCoredumpsAt.isAfter(clock.instant()); } void reset() { nextRemoveOldFilesAt = Instant.EPOCH; nextHandleOldCoredumpsAt = Instant.EPOCH; } } }
`yms_check_vespa` and `yms_check_host_life` do not work for config server
public void writeMetricsConfig(ContainerName containerName, NodeSpec node) { List<SecretAgentCheckConfig> configs = new ArrayList<>(); Path vespaCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa"); SecretAgentCheckConfig vespaSchedule = new SecretAgentCheckConfig("vespa", 60, vespaCheckPath, "all"); configs.add(annotatedCheck(node, vespaSchedule)); Path vespaHealthCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa_health"); SecretAgentCheckConfig vespaHealthSchedule = new SecretAgentCheckConfig("vespa", 60, vespaHealthCheckPath, "all"); configs.add(annotatedCheck(node, vespaHealthSchedule)); Path hostLifeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_host_life"); SecretAgentCheckConfig hostLifeSchedule = new SecretAgentCheckConfig("host-life", 60, hostLifeCheckPath); configs.add(annotatedCheck(node, hostLifeSchedule)); Path ntpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ntp"); SecretAgentCheckConfig ntpSchedule = new SecretAgentCheckConfig("host-life", 60, ntpCheckPath); configs.add(annotatedCheck(node, ntpSchedule)); Path coredumpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_coredumps"); SecretAgentCheckConfig coredumpSchedule = new SecretAgentCheckConfig("system-coredumps-processing", 300, coredumpCheckPath, "--application", "system-coredumps-processing", "--lastmin", "129600", "--crit", "1", "--coredir", "/home/y/var/crash/processing"); configs.add(annotatedCheck(node, coredumpSchedule)); if (node.getNodeType() == NodeType.config) { Path configServerCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ymonsb2"); SecretAgentCheckConfig configServerSchedule = new SecretAgentCheckConfig("configserver", 60, configServerCheckPath, "-zero", "configserver"); configs.add(annotatedCheck(node, configServerSchedule)); Path zkbackupCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig zkbackupSchedule = new SecretAgentCheckConfig("zkbackupage", 300, zkbackupCheckPath, "-f", "/home/y/var/vespa-hosted/zkbackup.stat", "-m", "150", "-a", "config-zkbackupage"); configs.add(annotatedCheck(node, zkbackupSchedule)); } if (node.getNodeType() == NodeType.proxy) { Path routingAgeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig routingAgeSchedule = new SecretAgentCheckConfig("routing-configage", 60, routingAgeCheckPath, "-f", "home/y/var/vespa-hosted/routing/nginx.conf", "-m", "90", "-a", "routing-configage"); configs.add(annotatedCheck(node, routingAgeSchedule)); Path sslCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ssl_status"); SecretAgentCheckConfig sslSchedule = new SecretAgentCheckConfig("zkbackupage", 300, sslCheckPath, "-e", "localhost", "-p", "4443", "-t", "30"); configs.add(annotatedCheck(node, sslSchedule)); } Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/etc/yamas-agent/")); configs.forEach(s -> IOExceptionUtil.uncheck(() -> s.writeTo(yamasAgentFolder))); final String[] restartYamasAgent = new String[]{"service", "yamas-agent", "restart"}; dockerOperations.executeCommandInContainerAsRoot(containerName, restartYamasAgent); }
public void writeMetricsConfig(ContainerName containerName, NodeSpec node) { List<SecretAgentCheckConfig> configs = new ArrayList<>(); Path hostLifeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_host_life"); SecretAgentCheckConfig hostLifeSchedule = new SecretAgentCheckConfig("host-life", 60, hostLifeCheckPath); configs.add(annotatedCheck(node, hostLifeSchedule)); Path ntpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ntp"); SecretAgentCheckConfig ntpSchedule = new SecretAgentCheckConfig("ntp", 60, ntpCheckPath); configs.add(annotatedCheck(node, ntpSchedule)); Path coredumpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_coredumps"); SecretAgentCheckConfig coredumpSchedule = new SecretAgentCheckConfig("system-coredumps-processing", 300, coredumpCheckPath, "--application", "system-coredumps-processing", "--lastmin", "129600", "--crit", "1", "--coredir", environment.pathInNodeUnderVespaHome("var/crash/processing").toString()); configs.add(annotatedCheck(node, coredumpSchedule)); if (node.getNodeType() != NodeType.config) { Path vespaHealthCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa_health"); SecretAgentCheckConfig vespaHealthSchedule = new SecretAgentCheckConfig("vespa-health", 60, vespaHealthCheckPath, "all"); configs.add(annotatedCheck(node, vespaHealthSchedule)); Path vespaCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa"); SecretAgentCheckConfig vespaSchedule = new SecretAgentCheckConfig("vespa", 60, vespaCheckPath, "all"); configs.add(annotatedCheck(node, vespaSchedule)); } if (node.getNodeType() == NodeType.config) { Path configServerCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ymonsb2"); SecretAgentCheckConfig configServerSchedule = new SecretAgentCheckConfig("configserver", 60, configServerCheckPath, "-zero", "configserver"); configs.add(annotatedCheck(node, configServerSchedule)); Path zkbackupCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig zkbackupSchedule = new SecretAgentCheckConfig("zkbackupage", 300, zkbackupCheckPath, "-f", environment.pathInNodeUnderVespaHome("var/vespa-hosted/zkbackup.stat").toString(), "-m", "150", "-a", "config-zkbackupage"); configs.add(annotatedCheck(node, zkbackupSchedule)); } if (node.getNodeType() == NodeType.proxy) { Path routingAgeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig routingAgeSchedule = new SecretAgentCheckConfig("routing-configage", 60, routingAgeCheckPath, "-f", environment.pathInNodeUnderVespaHome("var/vespa-hosted/routing/nginx.conf").toString(), "-m", "90", "-a", "routing-configage"); configs.add(annotatedCheck(node, routingAgeSchedule)); Path sslCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ssl_status"); SecretAgentCheckConfig sslSchedule = new SecretAgentCheckConfig("ssl-status", 300, sslCheckPath, "-e", "localhost", "-p", "4443", "-t", "30"); configs.add(annotatedCheck(node, sslSchedule)); } Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/etc/yamas-agent/")); configs.forEach(s -> IOExceptionUtil.uncheck(() -> s.writeTo(yamasAgentFolder))); final String[] restartYamasAgent = new String[]{"service", "yamas-agent", "restart"}; dockerOperations.executeCommandInContainerAsRoot(containerName, restartYamasAgent); }
class StorageMaintainer { private static final ContainerName NODE_ADMIN = new ContainerName("node-admin"); private static final ObjectMapper objectMapper = new ObjectMapper(); private final GaugeWrapper numberOfCoredumpsOnHost; private final CounterWrapper numberOfNodeAdminMaintenanceFails; private final DockerOperations dockerOperations; private final ProcessExecuter processExecuter; private final Environment environment; private final Clock clock; private Map<ContainerName, MaintenanceThrottler> maintenanceThrottlerByContainerName = new ConcurrentHashMap<>(); public StorageMaintainer(DockerOperations dockerOperations, ProcessExecuter processExecuter, MetricReceiverWrapper metricReceiver, Environment environment, Clock clock) { this.dockerOperations = dockerOperations; this.processExecuter = processExecuter; this.environment = environment; this.clock = clock; Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); numberOfNodeAdminMaintenanceFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.maintenance.fails"); numberOfCoredumpsOnHost = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.coredumps"); } private SecretAgentCheckConfig annotatedCheck(NodeSpec node, SecretAgentCheckConfig check) { check.withTag("namespace", "Vespa") .withTag("role", "tenants") .withTag("flavor", node.getFlavor()) .withTag("canonicalFlavor", node.getCanonicalFlavor()) .withTag("state", node.getState().toString()) .withTag("zone", environment.getZone()) .withTag("parentHostname", environment.getParentHostHostname()); node.getOwner().ifPresent(owner -> check .withTag("tenantName", owner.getTenant()) .withTag("app", owner.getApplication() + "." + owner.getInstance()) .withTag("applicationName", owner.getApplication()) .withTag("instanceName", owner.getInstance()) .withTag("applicationId", owner.getTenant() + "." + owner.getApplication() + "." + owner.getInstance())); node.getMembership().ifPresent(membership -> check .withTag("clustertype", membership.getClusterType()) .withTag("clusterid", membership.getClusterId())); node.getVespaVersion().ifPresent(version -> check.withTag("vespaVersion", version)); return check; } public void writeFilebeatConfig(ContainerName containerName, NodeSpec node) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); try { FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment); Optional<String> config = filebeatConfigProvider.getConfig(node); if (!config.isPresent()) { logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + node.toString()); return; } Path filebeatPath = environment.pathInNodeAdminFromPathInNode( containerName, Paths.get("/etc/filebeat/filebeat.yml")); Files.write(filebeatPath, config.get().getBytes()); logger.info("Wrote filebeat config."); } catch (Throwable t) { logger.error("Failed writing filebeat config; " + node, t); } } public Optional<Long> getDiskUsageFor(ContainerName containerName) { Path containerDir = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/home/")); try { return Optional.of(getDiskUsedInBytes(containerDir)); } catch (Throwable e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); logger.error("Problems during disk usage calculations in " + containerDir.toAbsolutePath(), e); return Optional.empty(); } } long getDiskUsedInBytes(Path path) throws IOException, InterruptedException { if (!Files.exists(path)) { return 0; } final String[] command = {"du", "-xsk", path.toString()}; Process duCommand = new ProcessBuilder().command(command).start(); if (!duCommand.waitFor(60, TimeUnit.SECONDS)) { duCommand.destroy(); throw new RuntimeException("Disk usage command timed out, aborting."); } String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream())); String[] results = output.split("\t"); if (results.length != 2) { throw new RuntimeException("Result from disk usage command not as expected: " + output); } long diskUsageKB = Long.valueOf(results[0]); return diskUsageKB * 1024; } /** * Deletes old log files for vespa, nginx, logstash, etc. */ public void removeOldFilesFromNode(ContainerName containerName) { if (! getMaintenanceThrottlerFor(containerName).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextRemoveOldFilesTime(); } private void addRemoveOldFilesCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName) { Path[] pathsToClean = { environment.pathInNodeUnderVespaHome("logs/elasticsearch2"), environment.pathInNodeUnderVespaHome("logs/logstash2"), environment.pathInNodeUnderVespaHome("logs/daemontools_y"), environment.pathInNodeUnderVespaHome("logs/nginx"), environment.pathInNodeUnderVespaHome("logs/vespa") }; for (Path pathToClean : pathsToClean) { Path path = environment.pathInNodeAdminFromPathInNode(containerName, pathToClean); if (Files.exists(path)) { maintainerExecutor.addJob("delete-files") .withArgument("basePath", path) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*\\.log.+") .withArgument("recursive", false); } } Path qrsDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/qrs")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", qrsDir) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*QueryAccessLog.*") .withArgument("recursive", false); Path logArchiveDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/logarchive")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", logArchiveDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); } /** * Checks if container has any new coredumps, reports and archives them if so * * @param force Set to true to bypass throttling */ public void handleCoreDumpsForContainer(ContainerName containerName, NodeSpec node, boolean force) { IOExceptionUtil.uncheck(() -> numberOfCoredumpsOnHost.sample(Files.list(environment.pathInNodeAdminToDoneCoredumps()).count())); if (! getMaintenanceThrottlerFor(containerName).shouldHandleCoredumpsNow() && !force) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextHandleCoredumpsTime(); } private void addHandleCoredumpsCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName, NodeSpec node) { if (!environment.getCoredumpFeedEndpoint().isPresent()) { return; } Map<String, Object> attributes = new HashMap<>(); attributes.put("hostname", node.getHostname()); attributes.put("parent_hostname", environment.getParentHostHostname()); attributes.put("region", environment.getRegion()); attributes.put("environment", environment.getEnvironment()); attributes.put("flavor", node.getFlavor()); attributes.put("kernel_version", System.getProperty("os.version")); node.getCurrentDockerImage().ifPresent(image -> attributes.put("docker_image", image.asString())); node.getVespaVersion().ifPresent(version -> attributes.put("vespa_version", version)); node.getOwner().ifPresent(owner -> { attributes.put("tenant", owner.getTenant()); attributes.put("application", owner.getApplication()); attributes.put("instance", owner.getInstance()); }); maintainerExecutor.addJob("handle-core-dumps") .withArgument("doneCoredumpsPath", environment.pathInNodeAdminToDoneCoredumps()) .withArgument("coredumpsPath", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/crash"))) .withArgument("feedEndpoint", environment.getCoredumpFeedEndpoint().get()) .withArgument("attributes", attributes); } /** * Deletes old * * archived app data * * Vespa logs * * Filedistribution files */ public void cleanNodeAdmin() { if (! getMaintenanceThrottlerFor(NODE_ADMIN).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); maintainerExecutor.addJob("delete-directories") .withArgument("basePath", environment.getPathResolver().getApplicationStoragePathForNodeAdmin()) .withArgument("maxAgeSeconds", Duration.ofDays(7).getSeconds()) .withArgument("dirNameRegex", "^" + Pattern.quote(Environment.APPLICATION_STORAGE_CLEANUP_PATH_PREFIX)); Path nodeAdminJDiskLogsPath = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("logs/vespa/")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", nodeAdminJDiskLogsPath) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); maintainerExecutor.execute(); getMaintenanceThrottlerFor(NODE_ADMIN).updateNextRemoveOldFilesTime(); } /** * Prepares the container-storage for the next container by deleting/archiving all the data of the current container. * Removes old files, reports coredumps and archives container data, runs when container enters state "dirty" */ public void cleanupNodeStorage(ContainerName containerName, NodeSpec node) { MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); addArchiveNodeData(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).reset(); } private void addArchiveNodeData(MaintainerExecutor maintainerExecutor, ContainerName containerName) { maintainerExecutor.addJob("recursive-delete") .withArgument("path", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var"))); maintainerExecutor.addJob("move-files") .withArgument("from", environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/"))) .withArgument("to", environment.pathInNodeAdminToNodeCleanup(containerName)); } /** * Runs node-maintainer's SpecVerifier and returns its output * @param node Node specification containing the excepted values we want to verify against * @return new combined hardware divergence * @throws RuntimeException if exit code != 0 */ public String getHardwareDivergence(NodeSpec node) { List<String> arguments = new ArrayList<>(Arrays.asList("specification", "--disk", Double.toString(node.getMinDiskAvailableGb()), "--memory", Double.toString(node.getMinMainMemoryAvailableGb()), "--cpu_cores", Double.toString(node.getMinCpuCores()), "--is_ssd", Boolean.toString(node.isFastDisk()), "--ips", String.join(",", node.getIpAddresses()))); node.getHardwareDivergence().ifPresent(hardwareDivergence -> { arguments.add("--divergence"); arguments.add(hardwareDivergence); }); return executeMaintainer("com.yahoo.vespa.hosted.node.verification.Main", arguments.toArray(new String[0])); } private String executeMaintainer(String mainClass, String... args) { String[] command = Stream.concat( Stream.of("sudo", "VESPA_HOME=" + getDefaults().vespaHome(), getDefaults().underVespaHome("libexec/vespa/node-admin/maintenance.sh"), mainClass), Stream.of(args)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(command); if (result.getFirst() != 0) { numberOfNodeAdminMaintenanceFails.add(); throw new RuntimeException( String.format("Maintainer failed to execute command: %s, Exit code: %d, Stdout/stderr: %s", Arrays.toString(command), result.getFirst(), result.getSecond())); } return result.getSecond().trim(); } catch (IOException e) { throw new RuntimeException("Failed to execute maintainer", e); } } /** * Wrapper for node-admin-maintenance, queues up maintenances jobs and sends a single request to maintenance JVM */ private class MaintainerExecutor { private final List<MaintainerExecutorJob> jobs = new ArrayList<>(); MaintainerExecutorJob addJob(String jobName) { MaintainerExecutorJob job = new MaintainerExecutorJob(jobName); jobs.add(job); return job; } void execute() { String args; try { args = objectMapper.writeValueAsString(jobs); } catch (JsonProcessingException e) { throw new RuntimeException("Failed transform list of maintenance jobs to JSON"); } executeMaintainer("com.yahoo.vespa.hosted.node.maintainer.Maintainer", args); } } private class MaintainerExecutorJob { @JsonProperty(value="type") private final String type; @JsonProperty(value="arguments") private final Map<String, Object> arguments = new HashMap<>(); MaintainerExecutorJob(String type) { this.type = type; } MaintainerExecutorJob withArgument(String argument, Object value) { arguments.put(argument, (value instanceof Path) ? value.toString() : value); return this; } } private MaintenanceThrottler getMaintenanceThrottlerFor(ContainerName containerName) { maintenanceThrottlerByContainerName.putIfAbsent(containerName, new MaintenanceThrottler()); return maintenanceThrottlerByContainerName.get(containerName); } private class MaintenanceThrottler { private Instant nextRemoveOldFilesAt = Instant.EPOCH; private Instant nextHandleOldCoredumpsAt = Instant.EPOCH; void updateNextRemoveOldFilesTime() { nextRemoveOldFilesAt = clock.instant().plus(Duration.ofHours(1)); } boolean shouldRemoveOldFilesNow() { return !nextRemoveOldFilesAt.isAfter(clock.instant()); } void updateNextHandleCoredumpsTime() { nextHandleOldCoredumpsAt = clock.instant().plus(Duration.ofMinutes(5)); } boolean shouldHandleCoredumpsNow() { return !nextHandleOldCoredumpsAt.isAfter(clock.instant()); } void reset() { nextRemoveOldFilesAt = Instant.EPOCH; nextHandleOldCoredumpsAt = Instant.EPOCH; } } }
class StorageMaintainer { private static final ContainerName NODE_ADMIN = new ContainerName("node-admin"); private static final ObjectMapper objectMapper = new ObjectMapper(); private final GaugeWrapper numberOfCoredumpsOnHost; private final CounterWrapper numberOfNodeAdminMaintenanceFails; private final DockerOperations dockerOperations; private final ProcessExecuter processExecuter; private final Environment environment; private final Clock clock; private Map<ContainerName, MaintenanceThrottler> maintenanceThrottlerByContainerName = new ConcurrentHashMap<>(); public StorageMaintainer(DockerOperations dockerOperations, ProcessExecuter processExecuter, MetricReceiverWrapper metricReceiver, Environment environment, Clock clock) { this.dockerOperations = dockerOperations; this.processExecuter = processExecuter; this.environment = environment; this.clock = clock; Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); numberOfNodeAdminMaintenanceFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.maintenance.fails"); numberOfCoredumpsOnHost = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.coredumps"); } private SecretAgentCheckConfig annotatedCheck(NodeSpec node, SecretAgentCheckConfig check) { check.withTag("namespace", "Vespa") .withTag("role", "tenants") .withTag("flavor", node.getFlavor()) .withTag("canonicalFlavor", node.getCanonicalFlavor()) .withTag("state", node.getState().toString()) .withTag("zone", environment.getZone()) .withTag("parentHostname", environment.getParentHostHostname()); node.getOwner().ifPresent(owner -> check .withTag("tenantName", owner.getTenant()) .withTag("app", owner.getApplication() + "." + owner.getInstance()) .withTag("applicationName", owner.getApplication()) .withTag("instanceName", owner.getInstance()) .withTag("applicationId", owner.getTenant() + "." + owner.getApplication() + "." + owner.getInstance())); node.getMembership().ifPresent(membership -> check .withTag("clustertype", membership.getClusterType()) .withTag("clusterid", membership.getClusterId())); node.getVespaVersion().ifPresent(version -> check.withTag("vespaVersion", version)); return check; } public void writeFilebeatConfig(ContainerName containerName, NodeSpec node) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); try { FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment); Optional<String> config = filebeatConfigProvider.getConfig(node); if (!config.isPresent()) { logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + node.toString()); return; } Path filebeatPath = environment.pathInNodeAdminFromPathInNode( containerName, Paths.get("/etc/filebeat/filebeat.yml")); Files.write(filebeatPath, config.get().getBytes()); logger.info("Wrote filebeat config."); } catch (Throwable t) { logger.error("Failed writing filebeat config; " + node, t); } } public Optional<Long> getDiskUsageFor(ContainerName containerName) { Path containerDir = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/home/")); try { return Optional.of(getDiskUsedInBytes(containerDir)); } catch (Throwable e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); logger.error("Problems during disk usage calculations in " + containerDir.toAbsolutePath(), e); return Optional.empty(); } } long getDiskUsedInBytes(Path path) throws IOException, InterruptedException { if (!Files.exists(path)) { return 0; } final String[] command = {"du", "-xsk", path.toString()}; Process duCommand = new ProcessBuilder().command(command).start(); if (!duCommand.waitFor(60, TimeUnit.SECONDS)) { duCommand.destroy(); throw new RuntimeException("Disk usage command timed out, aborting."); } String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream())); String[] results = output.split("\t"); if (results.length != 2) { throw new RuntimeException("Result from disk usage command not as expected: " + output); } long diskUsageKB = Long.valueOf(results[0]); return diskUsageKB * 1024; } /** * Deletes old log files for vespa, nginx, logstash, etc. */ public void removeOldFilesFromNode(ContainerName containerName) { if (! getMaintenanceThrottlerFor(containerName).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextRemoveOldFilesTime(); } private void addRemoveOldFilesCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName) { Path[] pathsToClean = { environment.pathInNodeUnderVespaHome("logs/elasticsearch2"), environment.pathInNodeUnderVespaHome("logs/logstash2"), environment.pathInNodeUnderVespaHome("logs/daemontools_y"), environment.pathInNodeUnderVespaHome("logs/nginx"), environment.pathInNodeUnderVespaHome("logs/vespa") }; for (Path pathToClean : pathsToClean) { Path path = environment.pathInNodeAdminFromPathInNode(containerName, pathToClean); if (Files.exists(path)) { maintainerExecutor.addJob("delete-files") .withArgument("basePath", path) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*\\.log.+") .withArgument("recursive", false); } } Path qrsDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/qrs")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", qrsDir) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*QueryAccessLog.*") .withArgument("recursive", false); Path logArchiveDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/logarchive")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", logArchiveDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); } /** * Checks if container has any new coredumps, reports and archives them if so * * @param force Set to true to bypass throttling */ public void handleCoreDumpsForContainer(ContainerName containerName, NodeSpec node, boolean force) { try { numberOfCoredumpsOnHost.sample(Files.list(environment.pathInNodeAdminToDoneCoredumps()).count()); } catch (IOException e) { } if (! getMaintenanceThrottlerFor(containerName).shouldHandleCoredumpsNow() && !force) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextHandleCoredumpsTime(); } private void addHandleCoredumpsCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName, NodeSpec node) { if (!environment.getCoredumpFeedEndpoint().isPresent()) { return; } Map<String, Object> attributes = new HashMap<>(); attributes.put("hostname", node.getHostname()); attributes.put("parent_hostname", environment.getParentHostHostname()); attributes.put("region", environment.getRegion()); attributes.put("environment", environment.getEnvironment()); attributes.put("flavor", node.getFlavor()); attributes.put("kernel_version", System.getProperty("os.version")); node.getCurrentDockerImage().ifPresent(image -> attributes.put("docker_image", image.asString())); node.getVespaVersion().ifPresent(version -> attributes.put("vespa_version", version)); node.getOwner().ifPresent(owner -> { attributes.put("tenant", owner.getTenant()); attributes.put("application", owner.getApplication()); attributes.put("instance", owner.getInstance()); }); maintainerExecutor.addJob("handle-core-dumps") .withArgument("doneCoredumpsPath", environment.pathInNodeAdminToDoneCoredumps()) .withArgument("coredumpsPath", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/crash"))) .withArgument("feedEndpoint", environment.getCoredumpFeedEndpoint().get()) .withArgument("attributes", attributes); } /** * Deletes old * * archived app data * * Vespa logs * * Filedistribution files */ public void cleanNodeAdmin() { if (! getMaintenanceThrottlerFor(NODE_ADMIN).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); maintainerExecutor.addJob("delete-directories") .withArgument("basePath", environment.getPathResolver().getApplicationStoragePathForNodeAdmin()) .withArgument("maxAgeSeconds", Duration.ofDays(7).getSeconds()) .withArgument("dirNameRegex", "^" + Pattern.quote(Environment.APPLICATION_STORAGE_CLEANUP_PATH_PREFIX)); Path nodeAdminJDiskLogsPath = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("logs/vespa/")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", nodeAdminJDiskLogsPath) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); maintainerExecutor.execute(); getMaintenanceThrottlerFor(NODE_ADMIN).updateNextRemoveOldFilesTime(); } /** * Prepares the container-storage for the next container by deleting/archiving all the data of the current container. * Removes old files, reports coredumps and archives container data, runs when container enters state "dirty" */ public void cleanupNodeStorage(ContainerName containerName, NodeSpec node) { MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); addArchiveNodeData(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).reset(); } private void addArchiveNodeData(MaintainerExecutor maintainerExecutor, ContainerName containerName) { maintainerExecutor.addJob("recursive-delete") .withArgument("path", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var"))); maintainerExecutor.addJob("move-files") .withArgument("from", environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/"))) .withArgument("to", environment.pathInNodeAdminToNodeCleanup(containerName)); } /** * Runs node-maintainer's SpecVerifier and returns its output * @param node Node specification containing the excepted values we want to verify against * @return new combined hardware divergence * @throws RuntimeException if exit code != 0 */ public String getHardwareDivergence(NodeSpec node) { List<String> arguments = new ArrayList<>(Arrays.asList("specification", "--disk", Double.toString(node.getMinDiskAvailableGb()), "--memory", Double.toString(node.getMinMainMemoryAvailableGb()), "--cpu_cores", Double.toString(node.getMinCpuCores()), "--is_ssd", Boolean.toString(node.isFastDisk()), "--ips", String.join(",", node.getIpAddresses()))); node.getHardwareDivergence().ifPresent(hardwareDivergence -> { arguments.add("--divergence"); arguments.add(hardwareDivergence); }); return executeMaintainer("com.yahoo.vespa.hosted.node.verification.Main", arguments.toArray(new String[0])); } private String executeMaintainer(String mainClass, String... args) { String[] command = Stream.concat( Stream.of("sudo", "VESPA_HOME=" + getDefaults().vespaHome(), getDefaults().underVespaHome("libexec/vespa/node-admin/maintenance.sh"), mainClass), Stream.of(args)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(command); if (result.getFirst() != 0) { numberOfNodeAdminMaintenanceFails.add(); throw new RuntimeException( String.format("Maintainer failed to execute command: %s, Exit code: %d, Stdout/stderr: %s", Arrays.toString(command), result.getFirst(), result.getSecond())); } return result.getSecond().trim(); } catch (IOException e) { throw new RuntimeException("Failed to execute maintainer", e); } } /** * Wrapper for node-admin-maintenance, queues up maintenances jobs and sends a single request to maintenance JVM */ private class MaintainerExecutor { private final List<MaintainerExecutorJob> jobs = new ArrayList<>(); MaintainerExecutorJob addJob(String jobName) { MaintainerExecutorJob job = new MaintainerExecutorJob(jobName); jobs.add(job); return job; } void execute() { String args; try { args = objectMapper.writeValueAsString(jobs); } catch (JsonProcessingException e) { throw new RuntimeException("Failed transform list of maintenance jobs to JSON"); } executeMaintainer("com.yahoo.vespa.hosted.node.maintainer.Maintainer", args); } } private class MaintainerExecutorJob { @JsonProperty(value="type") private final String type; @JsonProperty(value="arguments") private final Map<String, Object> arguments = new HashMap<>(); MaintainerExecutorJob(String type) { this.type = type; } MaintainerExecutorJob withArgument(String argument, Object value) { arguments.put(argument, (value instanceof Path) ? value.toString() : value); return this; } } private MaintenanceThrottler getMaintenanceThrottlerFor(ContainerName containerName) { maintenanceThrottlerByContainerName.putIfAbsent(containerName, new MaintenanceThrottler()); return maintenanceThrottlerByContainerName.get(containerName); } private class MaintenanceThrottler { private Instant nextRemoveOldFilesAt = Instant.EPOCH; private Instant nextHandleOldCoredumpsAt = Instant.EPOCH; void updateNextRemoveOldFilesTime() { nextRemoveOldFilesAt = clock.instant().plus(Duration.ofHours(1)); } boolean shouldRemoveOldFilesNow() { return !nextRemoveOldFilesAt.isAfter(clock.instant()); } void updateNextHandleCoredumpsTime() { nextHandleOldCoredumpsAt = clock.instant().plus(Duration.ofMinutes(5)); } boolean shouldHandleCoredumpsNow() { return !nextHandleOldCoredumpsAt.isAfter(clock.instant()); } void reset() { nextRemoveOldFilesAt = Instant.EPOCH; nextHandleOldCoredumpsAt = Instant.EPOCH; } } }
`/home/y` -> `environment.pathInNodeUnderVespaHome()`
public void writeMetricsConfig(ContainerName containerName, NodeSpec node) { List<SecretAgentCheckConfig> configs = new ArrayList<>(); Path vespaCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa"); SecretAgentCheckConfig vespaSchedule = new SecretAgentCheckConfig("vespa", 60, vespaCheckPath, "all"); configs.add(annotatedCheck(node, vespaSchedule)); Path vespaHealthCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa_health"); SecretAgentCheckConfig vespaHealthSchedule = new SecretAgentCheckConfig("vespa", 60, vespaHealthCheckPath, "all"); configs.add(annotatedCheck(node, vespaHealthSchedule)); Path hostLifeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_host_life"); SecretAgentCheckConfig hostLifeSchedule = new SecretAgentCheckConfig("host-life", 60, hostLifeCheckPath); configs.add(annotatedCheck(node, hostLifeSchedule)); Path ntpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ntp"); SecretAgentCheckConfig ntpSchedule = new SecretAgentCheckConfig("host-life", 60, ntpCheckPath); configs.add(annotatedCheck(node, ntpSchedule)); Path coredumpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_coredumps"); SecretAgentCheckConfig coredumpSchedule = new SecretAgentCheckConfig("system-coredumps-processing", 300, coredumpCheckPath, "--application", "system-coredumps-processing", "--lastmin", "129600", "--crit", "1", "--coredir", "/home/y/var/crash/processing"); configs.add(annotatedCheck(node, coredumpSchedule)); if (node.getNodeType() == NodeType.config) { Path configServerCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ymonsb2"); SecretAgentCheckConfig configServerSchedule = new SecretAgentCheckConfig("configserver", 60, configServerCheckPath, "-zero", "configserver"); configs.add(annotatedCheck(node, configServerSchedule)); Path zkbackupCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig zkbackupSchedule = new SecretAgentCheckConfig("zkbackupage", 300, zkbackupCheckPath, "-f", "/home/y/var/vespa-hosted/zkbackup.stat", "-m", "150", "-a", "config-zkbackupage"); configs.add(annotatedCheck(node, zkbackupSchedule)); } if (node.getNodeType() == NodeType.proxy) { Path routingAgeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig routingAgeSchedule = new SecretAgentCheckConfig("routing-configage", 60, routingAgeCheckPath, "-f", "home/y/var/vespa-hosted/routing/nginx.conf", "-m", "90", "-a", "routing-configage"); configs.add(annotatedCheck(node, routingAgeSchedule)); Path sslCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ssl_status"); SecretAgentCheckConfig sslSchedule = new SecretAgentCheckConfig("zkbackupage", 300, sslCheckPath, "-e", "localhost", "-p", "4443", "-t", "30"); configs.add(annotatedCheck(node, sslSchedule)); } Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/etc/yamas-agent/")); configs.forEach(s -> IOExceptionUtil.uncheck(() -> s.writeTo(yamasAgentFolder))); final String[] restartYamasAgent = new String[]{"service", "yamas-agent", "restart"}; dockerOperations.executeCommandInContainerAsRoot(containerName, restartYamasAgent); }
zkbackupCheckPath, "-f", "/home/y/var/vespa-hosted/zkbackup.stat", "-m", "150", "-a", "config-zkbackupage");
public void writeMetricsConfig(ContainerName containerName, NodeSpec node) { List<SecretAgentCheckConfig> configs = new ArrayList<>(); Path hostLifeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_host_life"); SecretAgentCheckConfig hostLifeSchedule = new SecretAgentCheckConfig("host-life", 60, hostLifeCheckPath); configs.add(annotatedCheck(node, hostLifeSchedule)); Path ntpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ntp"); SecretAgentCheckConfig ntpSchedule = new SecretAgentCheckConfig("ntp", 60, ntpCheckPath); configs.add(annotatedCheck(node, ntpSchedule)); Path coredumpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_coredumps"); SecretAgentCheckConfig coredumpSchedule = new SecretAgentCheckConfig("system-coredumps-processing", 300, coredumpCheckPath, "--application", "system-coredumps-processing", "--lastmin", "129600", "--crit", "1", "--coredir", environment.pathInNodeUnderVespaHome("var/crash/processing").toString()); configs.add(annotatedCheck(node, coredumpSchedule)); if (node.getNodeType() != NodeType.config) { Path vespaHealthCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa_health"); SecretAgentCheckConfig vespaHealthSchedule = new SecretAgentCheckConfig("vespa-health", 60, vespaHealthCheckPath, "all"); configs.add(annotatedCheck(node, vespaHealthSchedule)); Path vespaCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa"); SecretAgentCheckConfig vespaSchedule = new SecretAgentCheckConfig("vespa", 60, vespaCheckPath, "all"); configs.add(annotatedCheck(node, vespaSchedule)); } if (node.getNodeType() == NodeType.config) { Path configServerCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ymonsb2"); SecretAgentCheckConfig configServerSchedule = new SecretAgentCheckConfig("configserver", 60, configServerCheckPath, "-zero", "configserver"); configs.add(annotatedCheck(node, configServerSchedule)); Path zkbackupCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig zkbackupSchedule = new SecretAgentCheckConfig("zkbackupage", 300, zkbackupCheckPath, "-f", environment.pathInNodeUnderVespaHome("var/vespa-hosted/zkbackup.stat").toString(), "-m", "150", "-a", "config-zkbackupage"); configs.add(annotatedCheck(node, zkbackupSchedule)); } if (node.getNodeType() == NodeType.proxy) { Path routingAgeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig routingAgeSchedule = new SecretAgentCheckConfig("routing-configage", 60, routingAgeCheckPath, "-f", environment.pathInNodeUnderVespaHome("var/vespa-hosted/routing/nginx.conf").toString(), "-m", "90", "-a", "routing-configage"); configs.add(annotatedCheck(node, routingAgeSchedule)); Path sslCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ssl_status"); SecretAgentCheckConfig sslSchedule = new SecretAgentCheckConfig("ssl-status", 300, sslCheckPath, "-e", "localhost", "-p", "4443", "-t", "30"); configs.add(annotatedCheck(node, sslSchedule)); } Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/etc/yamas-agent/")); configs.forEach(s -> IOExceptionUtil.uncheck(() -> s.writeTo(yamasAgentFolder))); final String[] restartYamasAgent = new String[]{"service", "yamas-agent", "restart"}; dockerOperations.executeCommandInContainerAsRoot(containerName, restartYamasAgent); }
class StorageMaintainer { private static final ContainerName NODE_ADMIN = new ContainerName("node-admin"); private static final ObjectMapper objectMapper = new ObjectMapper(); private final GaugeWrapper numberOfCoredumpsOnHost; private final CounterWrapper numberOfNodeAdminMaintenanceFails; private final DockerOperations dockerOperations; private final ProcessExecuter processExecuter; private final Environment environment; private final Clock clock; private Map<ContainerName, MaintenanceThrottler> maintenanceThrottlerByContainerName = new ConcurrentHashMap<>(); public StorageMaintainer(DockerOperations dockerOperations, ProcessExecuter processExecuter, MetricReceiverWrapper metricReceiver, Environment environment, Clock clock) { this.dockerOperations = dockerOperations; this.processExecuter = processExecuter; this.environment = environment; this.clock = clock; Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); numberOfNodeAdminMaintenanceFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.maintenance.fails"); numberOfCoredumpsOnHost = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.coredumps"); } private SecretAgentCheckConfig annotatedCheck(NodeSpec node, SecretAgentCheckConfig check) { check.withTag("namespace", "Vespa") .withTag("role", "tenants") .withTag("flavor", node.getFlavor()) .withTag("canonicalFlavor", node.getCanonicalFlavor()) .withTag("state", node.getState().toString()) .withTag("zone", environment.getZone()) .withTag("parentHostname", environment.getParentHostHostname()); node.getOwner().ifPresent(owner -> check .withTag("tenantName", owner.getTenant()) .withTag("app", owner.getApplication() + "." + owner.getInstance()) .withTag("applicationName", owner.getApplication()) .withTag("instanceName", owner.getInstance()) .withTag("applicationId", owner.getTenant() + "." + owner.getApplication() + "." + owner.getInstance())); node.getMembership().ifPresent(membership -> check .withTag("clustertype", membership.getClusterType()) .withTag("clusterid", membership.getClusterId())); node.getVespaVersion().ifPresent(version -> check.withTag("vespaVersion", version)); return check; } public void writeFilebeatConfig(ContainerName containerName, NodeSpec node) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); try { FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment); Optional<String> config = filebeatConfigProvider.getConfig(node); if (!config.isPresent()) { logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + node.toString()); return; } Path filebeatPath = environment.pathInNodeAdminFromPathInNode( containerName, Paths.get("/etc/filebeat/filebeat.yml")); Files.write(filebeatPath, config.get().getBytes()); logger.info("Wrote filebeat config."); } catch (Throwable t) { logger.error("Failed writing filebeat config; " + node, t); } } public Optional<Long> getDiskUsageFor(ContainerName containerName) { Path containerDir = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/home/")); try { return Optional.of(getDiskUsedInBytes(containerDir)); } catch (Throwable e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); logger.error("Problems during disk usage calculations in " + containerDir.toAbsolutePath(), e); return Optional.empty(); } } long getDiskUsedInBytes(Path path) throws IOException, InterruptedException { if (!Files.exists(path)) { return 0; } final String[] command = {"du", "-xsk", path.toString()}; Process duCommand = new ProcessBuilder().command(command).start(); if (!duCommand.waitFor(60, TimeUnit.SECONDS)) { duCommand.destroy(); throw new RuntimeException("Disk usage command timed out, aborting."); } String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream())); String[] results = output.split("\t"); if (results.length != 2) { throw new RuntimeException("Result from disk usage command not as expected: " + output); } long diskUsageKB = Long.valueOf(results[0]); return diskUsageKB * 1024; } /** * Deletes old log files for vespa, nginx, logstash, etc. */ public void removeOldFilesFromNode(ContainerName containerName) { if (! getMaintenanceThrottlerFor(containerName).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextRemoveOldFilesTime(); } private void addRemoveOldFilesCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName) { Path[] pathsToClean = { environment.pathInNodeUnderVespaHome("logs/elasticsearch2"), environment.pathInNodeUnderVespaHome("logs/logstash2"), environment.pathInNodeUnderVespaHome("logs/daemontools_y"), environment.pathInNodeUnderVespaHome("logs/nginx"), environment.pathInNodeUnderVespaHome("logs/vespa") }; for (Path pathToClean : pathsToClean) { Path path = environment.pathInNodeAdminFromPathInNode(containerName, pathToClean); if (Files.exists(path)) { maintainerExecutor.addJob("delete-files") .withArgument("basePath", path) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*\\.log.+") .withArgument("recursive", false); } } Path qrsDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/qrs")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", qrsDir) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*QueryAccessLog.*") .withArgument("recursive", false); Path logArchiveDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/logarchive")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", logArchiveDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); } /** * Checks if container has any new coredumps, reports and archives them if so * * @param force Set to true to bypass throttling */ public void handleCoreDumpsForContainer(ContainerName containerName, NodeSpec node, boolean force) { IOExceptionUtil.uncheck(() -> numberOfCoredumpsOnHost.sample(Files.list(environment.pathInNodeAdminToDoneCoredumps()).count())); if (! getMaintenanceThrottlerFor(containerName).shouldHandleCoredumpsNow() && !force) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextHandleCoredumpsTime(); } private void addHandleCoredumpsCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName, NodeSpec node) { if (!environment.getCoredumpFeedEndpoint().isPresent()) { return; } Map<String, Object> attributes = new HashMap<>(); attributes.put("hostname", node.getHostname()); attributes.put("parent_hostname", environment.getParentHostHostname()); attributes.put("region", environment.getRegion()); attributes.put("environment", environment.getEnvironment()); attributes.put("flavor", node.getFlavor()); attributes.put("kernel_version", System.getProperty("os.version")); node.getCurrentDockerImage().ifPresent(image -> attributes.put("docker_image", image.asString())); node.getVespaVersion().ifPresent(version -> attributes.put("vespa_version", version)); node.getOwner().ifPresent(owner -> { attributes.put("tenant", owner.getTenant()); attributes.put("application", owner.getApplication()); attributes.put("instance", owner.getInstance()); }); maintainerExecutor.addJob("handle-core-dumps") .withArgument("doneCoredumpsPath", environment.pathInNodeAdminToDoneCoredumps()) .withArgument("coredumpsPath", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/crash"))) .withArgument("feedEndpoint", environment.getCoredumpFeedEndpoint().get()) .withArgument("attributes", attributes); } /** * Deletes old * * archived app data * * Vespa logs * * Filedistribution files */ public void cleanNodeAdmin() { if (! getMaintenanceThrottlerFor(NODE_ADMIN).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); maintainerExecutor.addJob("delete-directories") .withArgument("basePath", environment.getPathResolver().getApplicationStoragePathForNodeAdmin()) .withArgument("maxAgeSeconds", Duration.ofDays(7).getSeconds()) .withArgument("dirNameRegex", "^" + Pattern.quote(Environment.APPLICATION_STORAGE_CLEANUP_PATH_PREFIX)); Path nodeAdminJDiskLogsPath = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("logs/vespa/")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", nodeAdminJDiskLogsPath) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); maintainerExecutor.execute(); getMaintenanceThrottlerFor(NODE_ADMIN).updateNextRemoveOldFilesTime(); } /** * Prepares the container-storage for the next container by deleting/archiving all the data of the current container. * Removes old files, reports coredumps and archives container data, runs when container enters state "dirty" */ public void cleanupNodeStorage(ContainerName containerName, NodeSpec node) { MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); addArchiveNodeData(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).reset(); } private void addArchiveNodeData(MaintainerExecutor maintainerExecutor, ContainerName containerName) { maintainerExecutor.addJob("recursive-delete") .withArgument("path", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var"))); maintainerExecutor.addJob("move-files") .withArgument("from", environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/"))) .withArgument("to", environment.pathInNodeAdminToNodeCleanup(containerName)); } /** * Runs node-maintainer's SpecVerifier and returns its output * @param node Node specification containing the excepted values we want to verify against * @return new combined hardware divergence * @throws RuntimeException if exit code != 0 */ public String getHardwareDivergence(NodeSpec node) { List<String> arguments = new ArrayList<>(Arrays.asList("specification", "--disk", Double.toString(node.getMinDiskAvailableGb()), "--memory", Double.toString(node.getMinMainMemoryAvailableGb()), "--cpu_cores", Double.toString(node.getMinCpuCores()), "--is_ssd", Boolean.toString(node.isFastDisk()), "--ips", String.join(",", node.getIpAddresses()))); node.getHardwareDivergence().ifPresent(hardwareDivergence -> { arguments.add("--divergence"); arguments.add(hardwareDivergence); }); return executeMaintainer("com.yahoo.vespa.hosted.node.verification.Main", arguments.toArray(new String[0])); } private String executeMaintainer(String mainClass, String... args) { String[] command = Stream.concat( Stream.of("sudo", "VESPA_HOME=" + getDefaults().vespaHome(), getDefaults().underVespaHome("libexec/vespa/node-admin/maintenance.sh"), mainClass), Stream.of(args)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(command); if (result.getFirst() != 0) { numberOfNodeAdminMaintenanceFails.add(); throw new RuntimeException( String.format("Maintainer failed to execute command: %s, Exit code: %d, Stdout/stderr: %s", Arrays.toString(command), result.getFirst(), result.getSecond())); } return result.getSecond().trim(); } catch (IOException e) { throw new RuntimeException("Failed to execute maintainer", e); } } /** * Wrapper for node-admin-maintenance, queues up maintenances jobs and sends a single request to maintenance JVM */ private class MaintainerExecutor { private final List<MaintainerExecutorJob> jobs = new ArrayList<>(); MaintainerExecutorJob addJob(String jobName) { MaintainerExecutorJob job = new MaintainerExecutorJob(jobName); jobs.add(job); return job; } void execute() { String args; try { args = objectMapper.writeValueAsString(jobs); } catch (JsonProcessingException e) { throw new RuntimeException("Failed transform list of maintenance jobs to JSON"); } executeMaintainer("com.yahoo.vespa.hosted.node.maintainer.Maintainer", args); } } private class MaintainerExecutorJob { @JsonProperty(value="type") private final String type; @JsonProperty(value="arguments") private final Map<String, Object> arguments = new HashMap<>(); MaintainerExecutorJob(String type) { this.type = type; } MaintainerExecutorJob withArgument(String argument, Object value) { arguments.put(argument, (value instanceof Path) ? value.toString() : value); return this; } } private MaintenanceThrottler getMaintenanceThrottlerFor(ContainerName containerName) { maintenanceThrottlerByContainerName.putIfAbsent(containerName, new MaintenanceThrottler()); return maintenanceThrottlerByContainerName.get(containerName); } private class MaintenanceThrottler { private Instant nextRemoveOldFilesAt = Instant.EPOCH; private Instant nextHandleOldCoredumpsAt = Instant.EPOCH; void updateNextRemoveOldFilesTime() { nextRemoveOldFilesAt = clock.instant().plus(Duration.ofHours(1)); } boolean shouldRemoveOldFilesNow() { return !nextRemoveOldFilesAt.isAfter(clock.instant()); } void updateNextHandleCoredumpsTime() { nextHandleOldCoredumpsAt = clock.instant().plus(Duration.ofMinutes(5)); } boolean shouldHandleCoredumpsNow() { return !nextHandleOldCoredumpsAt.isAfter(clock.instant()); } void reset() { nextRemoveOldFilesAt = Instant.EPOCH; nextHandleOldCoredumpsAt = Instant.EPOCH; } } }
class StorageMaintainer { private static final ContainerName NODE_ADMIN = new ContainerName("node-admin"); private static final ObjectMapper objectMapper = new ObjectMapper(); private final GaugeWrapper numberOfCoredumpsOnHost; private final CounterWrapper numberOfNodeAdminMaintenanceFails; private final DockerOperations dockerOperations; private final ProcessExecuter processExecuter; private final Environment environment; private final Clock clock; private Map<ContainerName, MaintenanceThrottler> maintenanceThrottlerByContainerName = new ConcurrentHashMap<>(); public StorageMaintainer(DockerOperations dockerOperations, ProcessExecuter processExecuter, MetricReceiverWrapper metricReceiver, Environment environment, Clock clock) { this.dockerOperations = dockerOperations; this.processExecuter = processExecuter; this.environment = environment; this.clock = clock; Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); numberOfNodeAdminMaintenanceFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.maintenance.fails"); numberOfCoredumpsOnHost = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.coredumps"); } private SecretAgentCheckConfig annotatedCheck(NodeSpec node, SecretAgentCheckConfig check) { check.withTag("namespace", "Vespa") .withTag("role", "tenants") .withTag("flavor", node.getFlavor()) .withTag("canonicalFlavor", node.getCanonicalFlavor()) .withTag("state", node.getState().toString()) .withTag("zone", environment.getZone()) .withTag("parentHostname", environment.getParentHostHostname()); node.getOwner().ifPresent(owner -> check .withTag("tenantName", owner.getTenant()) .withTag("app", owner.getApplication() + "." + owner.getInstance()) .withTag("applicationName", owner.getApplication()) .withTag("instanceName", owner.getInstance()) .withTag("applicationId", owner.getTenant() + "." + owner.getApplication() + "." + owner.getInstance())); node.getMembership().ifPresent(membership -> check .withTag("clustertype", membership.getClusterType()) .withTag("clusterid", membership.getClusterId())); node.getVespaVersion().ifPresent(version -> check.withTag("vespaVersion", version)); return check; } public void writeFilebeatConfig(ContainerName containerName, NodeSpec node) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); try { FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment); Optional<String> config = filebeatConfigProvider.getConfig(node); if (!config.isPresent()) { logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + node.toString()); return; } Path filebeatPath = environment.pathInNodeAdminFromPathInNode( containerName, Paths.get("/etc/filebeat/filebeat.yml")); Files.write(filebeatPath, config.get().getBytes()); logger.info("Wrote filebeat config."); } catch (Throwable t) { logger.error("Failed writing filebeat config; " + node, t); } } public Optional<Long> getDiskUsageFor(ContainerName containerName) { Path containerDir = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/home/")); try { return Optional.of(getDiskUsedInBytes(containerDir)); } catch (Throwable e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); logger.error("Problems during disk usage calculations in " + containerDir.toAbsolutePath(), e); return Optional.empty(); } } long getDiskUsedInBytes(Path path) throws IOException, InterruptedException { if (!Files.exists(path)) { return 0; } final String[] command = {"du", "-xsk", path.toString()}; Process duCommand = new ProcessBuilder().command(command).start(); if (!duCommand.waitFor(60, TimeUnit.SECONDS)) { duCommand.destroy(); throw new RuntimeException("Disk usage command timed out, aborting."); } String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream())); String[] results = output.split("\t"); if (results.length != 2) { throw new RuntimeException("Result from disk usage command not as expected: " + output); } long diskUsageKB = Long.valueOf(results[0]); return diskUsageKB * 1024; } /** * Deletes old log files for vespa, nginx, logstash, etc. */ public void removeOldFilesFromNode(ContainerName containerName) { if (! getMaintenanceThrottlerFor(containerName).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextRemoveOldFilesTime(); } private void addRemoveOldFilesCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName) { Path[] pathsToClean = { environment.pathInNodeUnderVespaHome("logs/elasticsearch2"), environment.pathInNodeUnderVespaHome("logs/logstash2"), environment.pathInNodeUnderVespaHome("logs/daemontools_y"), environment.pathInNodeUnderVespaHome("logs/nginx"), environment.pathInNodeUnderVespaHome("logs/vespa") }; for (Path pathToClean : pathsToClean) { Path path = environment.pathInNodeAdminFromPathInNode(containerName, pathToClean); if (Files.exists(path)) { maintainerExecutor.addJob("delete-files") .withArgument("basePath", path) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*\\.log.+") .withArgument("recursive", false); } } Path qrsDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/qrs")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", qrsDir) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*QueryAccessLog.*") .withArgument("recursive", false); Path logArchiveDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/logarchive")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", logArchiveDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); } /** * Checks if container has any new coredumps, reports and archives them if so * * @param force Set to true to bypass throttling */ public void handleCoreDumpsForContainer(ContainerName containerName, NodeSpec node, boolean force) { try { numberOfCoredumpsOnHost.sample(Files.list(environment.pathInNodeAdminToDoneCoredumps()).count()); } catch (IOException e) { } if (! getMaintenanceThrottlerFor(containerName).shouldHandleCoredumpsNow() && !force) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextHandleCoredumpsTime(); } private void addHandleCoredumpsCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName, NodeSpec node) { if (!environment.getCoredumpFeedEndpoint().isPresent()) { return; } Map<String, Object> attributes = new HashMap<>(); attributes.put("hostname", node.getHostname()); attributes.put("parent_hostname", environment.getParentHostHostname()); attributes.put("region", environment.getRegion()); attributes.put("environment", environment.getEnvironment()); attributes.put("flavor", node.getFlavor()); attributes.put("kernel_version", System.getProperty("os.version")); node.getCurrentDockerImage().ifPresent(image -> attributes.put("docker_image", image.asString())); node.getVespaVersion().ifPresent(version -> attributes.put("vespa_version", version)); node.getOwner().ifPresent(owner -> { attributes.put("tenant", owner.getTenant()); attributes.put("application", owner.getApplication()); attributes.put("instance", owner.getInstance()); }); maintainerExecutor.addJob("handle-core-dumps") .withArgument("doneCoredumpsPath", environment.pathInNodeAdminToDoneCoredumps()) .withArgument("coredumpsPath", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/crash"))) .withArgument("feedEndpoint", environment.getCoredumpFeedEndpoint().get()) .withArgument("attributes", attributes); } /** * Deletes old * * archived app data * * Vespa logs * * Filedistribution files */ public void cleanNodeAdmin() { if (! getMaintenanceThrottlerFor(NODE_ADMIN).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); maintainerExecutor.addJob("delete-directories") .withArgument("basePath", environment.getPathResolver().getApplicationStoragePathForNodeAdmin()) .withArgument("maxAgeSeconds", Duration.ofDays(7).getSeconds()) .withArgument("dirNameRegex", "^" + Pattern.quote(Environment.APPLICATION_STORAGE_CLEANUP_PATH_PREFIX)); Path nodeAdminJDiskLogsPath = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("logs/vespa/")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", nodeAdminJDiskLogsPath) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); maintainerExecutor.execute(); getMaintenanceThrottlerFor(NODE_ADMIN).updateNextRemoveOldFilesTime(); } /** * Prepares the container-storage for the next container by deleting/archiving all the data of the current container. * Removes old files, reports coredumps and archives container data, runs when container enters state "dirty" */ public void cleanupNodeStorage(ContainerName containerName, NodeSpec node) { MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); addArchiveNodeData(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).reset(); } private void addArchiveNodeData(MaintainerExecutor maintainerExecutor, ContainerName containerName) { maintainerExecutor.addJob("recursive-delete") .withArgument("path", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var"))); maintainerExecutor.addJob("move-files") .withArgument("from", environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/"))) .withArgument("to", environment.pathInNodeAdminToNodeCleanup(containerName)); } /** * Runs node-maintainer's SpecVerifier and returns its output * @param node Node specification containing the excepted values we want to verify against * @return new combined hardware divergence * @throws RuntimeException if exit code != 0 */ public String getHardwareDivergence(NodeSpec node) { List<String> arguments = new ArrayList<>(Arrays.asList("specification", "--disk", Double.toString(node.getMinDiskAvailableGb()), "--memory", Double.toString(node.getMinMainMemoryAvailableGb()), "--cpu_cores", Double.toString(node.getMinCpuCores()), "--is_ssd", Boolean.toString(node.isFastDisk()), "--ips", String.join(",", node.getIpAddresses()))); node.getHardwareDivergence().ifPresent(hardwareDivergence -> { arguments.add("--divergence"); arguments.add(hardwareDivergence); }); return executeMaintainer("com.yahoo.vespa.hosted.node.verification.Main", arguments.toArray(new String[0])); } private String executeMaintainer(String mainClass, String... args) { String[] command = Stream.concat( Stream.of("sudo", "VESPA_HOME=" + getDefaults().vespaHome(), getDefaults().underVespaHome("libexec/vespa/node-admin/maintenance.sh"), mainClass), Stream.of(args)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(command); if (result.getFirst() != 0) { numberOfNodeAdminMaintenanceFails.add(); throw new RuntimeException( String.format("Maintainer failed to execute command: %s, Exit code: %d, Stdout/stderr: %s", Arrays.toString(command), result.getFirst(), result.getSecond())); } return result.getSecond().trim(); } catch (IOException e) { throw new RuntimeException("Failed to execute maintainer", e); } } /** * Wrapper for node-admin-maintenance, queues up maintenances jobs and sends a single request to maintenance JVM */ private class MaintainerExecutor { private final List<MaintainerExecutorJob> jobs = new ArrayList<>(); MaintainerExecutorJob addJob(String jobName) { MaintainerExecutorJob job = new MaintainerExecutorJob(jobName); jobs.add(job); return job; } void execute() { String args; try { args = objectMapper.writeValueAsString(jobs); } catch (JsonProcessingException e) { throw new RuntimeException("Failed transform list of maintenance jobs to JSON"); } executeMaintainer("com.yahoo.vespa.hosted.node.maintainer.Maintainer", args); } } private class MaintainerExecutorJob { @JsonProperty(value="type") private final String type; @JsonProperty(value="arguments") private final Map<String, Object> arguments = new HashMap<>(); MaintainerExecutorJob(String type) { this.type = type; } MaintainerExecutorJob withArgument(String argument, Object value) { arguments.put(argument, (value instanceof Path) ? value.toString() : value); return this; } } private MaintenanceThrottler getMaintenanceThrottlerFor(ContainerName containerName) { maintenanceThrottlerByContainerName.putIfAbsent(containerName, new MaintenanceThrottler()); return maintenanceThrottlerByContainerName.get(containerName); } private class MaintenanceThrottler { private Instant nextRemoveOldFilesAt = Instant.EPOCH; private Instant nextHandleOldCoredumpsAt = Instant.EPOCH; void updateNextRemoveOldFilesTime() { nextRemoveOldFilesAt = clock.instant().plus(Duration.ofHours(1)); } boolean shouldRemoveOldFilesNow() { return !nextRemoveOldFilesAt.isAfter(clock.instant()); } void updateNextHandleCoredumpsTime() { nextHandleOldCoredumpsAt = clock.instant().plus(Duration.ofMinutes(5)); } boolean shouldHandleCoredumpsNow() { return !nextHandleOldCoredumpsAt.isAfter(clock.instant()); } void reset() { nextRemoveOldFilesAt = Instant.EPOCH; nextHandleOldCoredumpsAt = Instant.EPOCH; } } }
`/home/y` -> `environment.pathInNodeUnderVespaHome()`
public void writeMetricsConfig(ContainerName containerName, NodeSpec node) { List<SecretAgentCheckConfig> configs = new ArrayList<>(); Path vespaCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa"); SecretAgentCheckConfig vespaSchedule = new SecretAgentCheckConfig("vespa", 60, vespaCheckPath, "all"); configs.add(annotatedCheck(node, vespaSchedule)); Path vespaHealthCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa_health"); SecretAgentCheckConfig vespaHealthSchedule = new SecretAgentCheckConfig("vespa", 60, vespaHealthCheckPath, "all"); configs.add(annotatedCheck(node, vespaHealthSchedule)); Path hostLifeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_host_life"); SecretAgentCheckConfig hostLifeSchedule = new SecretAgentCheckConfig("host-life", 60, hostLifeCheckPath); configs.add(annotatedCheck(node, hostLifeSchedule)); Path ntpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ntp"); SecretAgentCheckConfig ntpSchedule = new SecretAgentCheckConfig("host-life", 60, ntpCheckPath); configs.add(annotatedCheck(node, ntpSchedule)); Path coredumpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_coredumps"); SecretAgentCheckConfig coredumpSchedule = new SecretAgentCheckConfig("system-coredumps-processing", 300, coredumpCheckPath, "--application", "system-coredumps-processing", "--lastmin", "129600", "--crit", "1", "--coredir", "/home/y/var/crash/processing"); configs.add(annotatedCheck(node, coredumpSchedule)); if (node.getNodeType() == NodeType.config) { Path configServerCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ymonsb2"); SecretAgentCheckConfig configServerSchedule = new SecretAgentCheckConfig("configserver", 60, configServerCheckPath, "-zero", "configserver"); configs.add(annotatedCheck(node, configServerSchedule)); Path zkbackupCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig zkbackupSchedule = new SecretAgentCheckConfig("zkbackupage", 300, zkbackupCheckPath, "-f", "/home/y/var/vespa-hosted/zkbackup.stat", "-m", "150", "-a", "config-zkbackupage"); configs.add(annotatedCheck(node, zkbackupSchedule)); } if (node.getNodeType() == NodeType.proxy) { Path routingAgeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig routingAgeSchedule = new SecretAgentCheckConfig("routing-configage", 60, routingAgeCheckPath, "-f", "home/y/var/vespa-hosted/routing/nginx.conf", "-m", "90", "-a", "routing-configage"); configs.add(annotatedCheck(node, routingAgeSchedule)); Path sslCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ssl_status"); SecretAgentCheckConfig sslSchedule = new SecretAgentCheckConfig("zkbackupage", 300, sslCheckPath, "-e", "localhost", "-p", "4443", "-t", "30"); configs.add(annotatedCheck(node, sslSchedule)); } Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/etc/yamas-agent/")); configs.forEach(s -> IOExceptionUtil.uncheck(() -> s.writeTo(yamasAgentFolder))); final String[] restartYamasAgent = new String[]{"service", "yamas-agent", "restart"}; dockerOperations.executeCommandInContainerAsRoot(containerName, restartYamasAgent); }
routingAgeCheckPath, "-f", "home/y/var/vespa-hosted/routing/nginx.conf", "-m", "90", "-a", "routing-configage");
public void writeMetricsConfig(ContainerName containerName, NodeSpec node) { List<SecretAgentCheckConfig> configs = new ArrayList<>(); Path hostLifeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_host_life"); SecretAgentCheckConfig hostLifeSchedule = new SecretAgentCheckConfig("host-life", 60, hostLifeCheckPath); configs.add(annotatedCheck(node, hostLifeSchedule)); Path ntpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ntp"); SecretAgentCheckConfig ntpSchedule = new SecretAgentCheckConfig("ntp", 60, ntpCheckPath); configs.add(annotatedCheck(node, ntpSchedule)); Path coredumpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_coredumps"); SecretAgentCheckConfig coredumpSchedule = new SecretAgentCheckConfig("system-coredumps-processing", 300, coredumpCheckPath, "--application", "system-coredumps-processing", "--lastmin", "129600", "--crit", "1", "--coredir", environment.pathInNodeUnderVespaHome("var/crash/processing").toString()); configs.add(annotatedCheck(node, coredumpSchedule)); if (node.getNodeType() != NodeType.config) { Path vespaHealthCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa_health"); SecretAgentCheckConfig vespaHealthSchedule = new SecretAgentCheckConfig("vespa-health", 60, vespaHealthCheckPath, "all"); configs.add(annotatedCheck(node, vespaHealthSchedule)); Path vespaCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa"); SecretAgentCheckConfig vespaSchedule = new SecretAgentCheckConfig("vespa", 60, vespaCheckPath, "all"); configs.add(annotatedCheck(node, vespaSchedule)); } if (node.getNodeType() == NodeType.config) { Path configServerCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ymonsb2"); SecretAgentCheckConfig configServerSchedule = new SecretAgentCheckConfig("configserver", 60, configServerCheckPath, "-zero", "configserver"); configs.add(annotatedCheck(node, configServerSchedule)); Path zkbackupCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig zkbackupSchedule = new SecretAgentCheckConfig("zkbackupage", 300, zkbackupCheckPath, "-f", environment.pathInNodeUnderVespaHome("var/vespa-hosted/zkbackup.stat").toString(), "-m", "150", "-a", "config-zkbackupage"); configs.add(annotatedCheck(node, zkbackupSchedule)); } if (node.getNodeType() == NodeType.proxy) { Path routingAgeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig routingAgeSchedule = new SecretAgentCheckConfig("routing-configage", 60, routingAgeCheckPath, "-f", environment.pathInNodeUnderVespaHome("var/vespa-hosted/routing/nginx.conf").toString(), "-m", "90", "-a", "routing-configage"); configs.add(annotatedCheck(node, routingAgeSchedule)); Path sslCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ssl_status"); SecretAgentCheckConfig sslSchedule = new SecretAgentCheckConfig("ssl-status", 300, sslCheckPath, "-e", "localhost", "-p", "4443", "-t", "30"); configs.add(annotatedCheck(node, sslSchedule)); } Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/etc/yamas-agent/")); configs.forEach(s -> IOExceptionUtil.uncheck(() -> s.writeTo(yamasAgentFolder))); final String[] restartYamasAgent = new String[]{"service", "yamas-agent", "restart"}; dockerOperations.executeCommandInContainerAsRoot(containerName, restartYamasAgent); }
class StorageMaintainer { private static final ContainerName NODE_ADMIN = new ContainerName("node-admin"); private static final ObjectMapper objectMapper = new ObjectMapper(); private final GaugeWrapper numberOfCoredumpsOnHost; private final CounterWrapper numberOfNodeAdminMaintenanceFails; private final DockerOperations dockerOperations; private final ProcessExecuter processExecuter; private final Environment environment; private final Clock clock; private Map<ContainerName, MaintenanceThrottler> maintenanceThrottlerByContainerName = new ConcurrentHashMap<>(); public StorageMaintainer(DockerOperations dockerOperations, ProcessExecuter processExecuter, MetricReceiverWrapper metricReceiver, Environment environment, Clock clock) { this.dockerOperations = dockerOperations; this.processExecuter = processExecuter; this.environment = environment; this.clock = clock; Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); numberOfNodeAdminMaintenanceFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.maintenance.fails"); numberOfCoredumpsOnHost = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.coredumps"); } private SecretAgentCheckConfig annotatedCheck(NodeSpec node, SecretAgentCheckConfig check) { check.withTag("namespace", "Vespa") .withTag("role", "tenants") .withTag("flavor", node.getFlavor()) .withTag("canonicalFlavor", node.getCanonicalFlavor()) .withTag("state", node.getState().toString()) .withTag("zone", environment.getZone()) .withTag("parentHostname", environment.getParentHostHostname()); node.getOwner().ifPresent(owner -> check .withTag("tenantName", owner.getTenant()) .withTag("app", owner.getApplication() + "." + owner.getInstance()) .withTag("applicationName", owner.getApplication()) .withTag("instanceName", owner.getInstance()) .withTag("applicationId", owner.getTenant() + "." + owner.getApplication() + "." + owner.getInstance())); node.getMembership().ifPresent(membership -> check .withTag("clustertype", membership.getClusterType()) .withTag("clusterid", membership.getClusterId())); node.getVespaVersion().ifPresent(version -> check.withTag("vespaVersion", version)); return check; } public void writeFilebeatConfig(ContainerName containerName, NodeSpec node) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); try { FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment); Optional<String> config = filebeatConfigProvider.getConfig(node); if (!config.isPresent()) { logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + node.toString()); return; } Path filebeatPath = environment.pathInNodeAdminFromPathInNode( containerName, Paths.get("/etc/filebeat/filebeat.yml")); Files.write(filebeatPath, config.get().getBytes()); logger.info("Wrote filebeat config."); } catch (Throwable t) { logger.error("Failed writing filebeat config; " + node, t); } } public Optional<Long> getDiskUsageFor(ContainerName containerName) { Path containerDir = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/home/")); try { return Optional.of(getDiskUsedInBytes(containerDir)); } catch (Throwable e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); logger.error("Problems during disk usage calculations in " + containerDir.toAbsolutePath(), e); return Optional.empty(); } } long getDiskUsedInBytes(Path path) throws IOException, InterruptedException { if (!Files.exists(path)) { return 0; } final String[] command = {"du", "-xsk", path.toString()}; Process duCommand = new ProcessBuilder().command(command).start(); if (!duCommand.waitFor(60, TimeUnit.SECONDS)) { duCommand.destroy(); throw new RuntimeException("Disk usage command timed out, aborting."); } String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream())); String[] results = output.split("\t"); if (results.length != 2) { throw new RuntimeException("Result from disk usage command not as expected: " + output); } long diskUsageKB = Long.valueOf(results[0]); return diskUsageKB * 1024; } /** * Deletes old log files for vespa, nginx, logstash, etc. */ public void removeOldFilesFromNode(ContainerName containerName) { if (! getMaintenanceThrottlerFor(containerName).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextRemoveOldFilesTime(); } private void addRemoveOldFilesCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName) { Path[] pathsToClean = { environment.pathInNodeUnderVespaHome("logs/elasticsearch2"), environment.pathInNodeUnderVespaHome("logs/logstash2"), environment.pathInNodeUnderVespaHome("logs/daemontools_y"), environment.pathInNodeUnderVespaHome("logs/nginx"), environment.pathInNodeUnderVespaHome("logs/vespa") }; for (Path pathToClean : pathsToClean) { Path path = environment.pathInNodeAdminFromPathInNode(containerName, pathToClean); if (Files.exists(path)) { maintainerExecutor.addJob("delete-files") .withArgument("basePath", path) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*\\.log.+") .withArgument("recursive", false); } } Path qrsDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/qrs")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", qrsDir) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*QueryAccessLog.*") .withArgument("recursive", false); Path logArchiveDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/logarchive")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", logArchiveDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); } /** * Checks if container has any new coredumps, reports and archives them if so * * @param force Set to true to bypass throttling */ public void handleCoreDumpsForContainer(ContainerName containerName, NodeSpec node, boolean force) { IOExceptionUtil.uncheck(() -> numberOfCoredumpsOnHost.sample(Files.list(environment.pathInNodeAdminToDoneCoredumps()).count())); if (! getMaintenanceThrottlerFor(containerName).shouldHandleCoredumpsNow() && !force) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextHandleCoredumpsTime(); } private void addHandleCoredumpsCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName, NodeSpec node) { if (!environment.getCoredumpFeedEndpoint().isPresent()) { return; } Map<String, Object> attributes = new HashMap<>(); attributes.put("hostname", node.getHostname()); attributes.put("parent_hostname", environment.getParentHostHostname()); attributes.put("region", environment.getRegion()); attributes.put("environment", environment.getEnvironment()); attributes.put("flavor", node.getFlavor()); attributes.put("kernel_version", System.getProperty("os.version")); node.getCurrentDockerImage().ifPresent(image -> attributes.put("docker_image", image.asString())); node.getVespaVersion().ifPresent(version -> attributes.put("vespa_version", version)); node.getOwner().ifPresent(owner -> { attributes.put("tenant", owner.getTenant()); attributes.put("application", owner.getApplication()); attributes.put("instance", owner.getInstance()); }); maintainerExecutor.addJob("handle-core-dumps") .withArgument("doneCoredumpsPath", environment.pathInNodeAdminToDoneCoredumps()) .withArgument("coredumpsPath", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/crash"))) .withArgument("feedEndpoint", environment.getCoredumpFeedEndpoint().get()) .withArgument("attributes", attributes); } /** * Deletes old * * archived app data * * Vespa logs * * Filedistribution files */ public void cleanNodeAdmin() { if (! getMaintenanceThrottlerFor(NODE_ADMIN).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); maintainerExecutor.addJob("delete-directories") .withArgument("basePath", environment.getPathResolver().getApplicationStoragePathForNodeAdmin()) .withArgument("maxAgeSeconds", Duration.ofDays(7).getSeconds()) .withArgument("dirNameRegex", "^" + Pattern.quote(Environment.APPLICATION_STORAGE_CLEANUP_PATH_PREFIX)); Path nodeAdminJDiskLogsPath = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("logs/vespa/")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", nodeAdminJDiskLogsPath) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); maintainerExecutor.execute(); getMaintenanceThrottlerFor(NODE_ADMIN).updateNextRemoveOldFilesTime(); } /** * Prepares the container-storage for the next container by deleting/archiving all the data of the current container. * Removes old files, reports coredumps and archives container data, runs when container enters state "dirty" */ public void cleanupNodeStorage(ContainerName containerName, NodeSpec node) { MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); addArchiveNodeData(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).reset(); } private void addArchiveNodeData(MaintainerExecutor maintainerExecutor, ContainerName containerName) { maintainerExecutor.addJob("recursive-delete") .withArgument("path", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var"))); maintainerExecutor.addJob("move-files") .withArgument("from", environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/"))) .withArgument("to", environment.pathInNodeAdminToNodeCleanup(containerName)); } /** * Runs node-maintainer's SpecVerifier and returns its output * @param node Node specification containing the excepted values we want to verify against * @return new combined hardware divergence * @throws RuntimeException if exit code != 0 */ public String getHardwareDivergence(NodeSpec node) { List<String> arguments = new ArrayList<>(Arrays.asList("specification", "--disk", Double.toString(node.getMinDiskAvailableGb()), "--memory", Double.toString(node.getMinMainMemoryAvailableGb()), "--cpu_cores", Double.toString(node.getMinCpuCores()), "--is_ssd", Boolean.toString(node.isFastDisk()), "--ips", String.join(",", node.getIpAddresses()))); node.getHardwareDivergence().ifPresent(hardwareDivergence -> { arguments.add("--divergence"); arguments.add(hardwareDivergence); }); return executeMaintainer("com.yahoo.vespa.hosted.node.verification.Main", arguments.toArray(new String[0])); } private String executeMaintainer(String mainClass, String... args) { String[] command = Stream.concat( Stream.of("sudo", "VESPA_HOME=" + getDefaults().vespaHome(), getDefaults().underVespaHome("libexec/vespa/node-admin/maintenance.sh"), mainClass), Stream.of(args)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(command); if (result.getFirst() != 0) { numberOfNodeAdminMaintenanceFails.add(); throw new RuntimeException( String.format("Maintainer failed to execute command: %s, Exit code: %d, Stdout/stderr: %s", Arrays.toString(command), result.getFirst(), result.getSecond())); } return result.getSecond().trim(); } catch (IOException e) { throw new RuntimeException("Failed to execute maintainer", e); } } /** * Wrapper for node-admin-maintenance, queues up maintenances jobs and sends a single request to maintenance JVM */ private class MaintainerExecutor { private final List<MaintainerExecutorJob> jobs = new ArrayList<>(); MaintainerExecutorJob addJob(String jobName) { MaintainerExecutorJob job = new MaintainerExecutorJob(jobName); jobs.add(job); return job; } void execute() { String args; try { args = objectMapper.writeValueAsString(jobs); } catch (JsonProcessingException e) { throw new RuntimeException("Failed transform list of maintenance jobs to JSON"); } executeMaintainer("com.yahoo.vespa.hosted.node.maintainer.Maintainer", args); } } private class MaintainerExecutorJob { @JsonProperty(value="type") private final String type; @JsonProperty(value="arguments") private final Map<String, Object> arguments = new HashMap<>(); MaintainerExecutorJob(String type) { this.type = type; } MaintainerExecutorJob withArgument(String argument, Object value) { arguments.put(argument, (value instanceof Path) ? value.toString() : value); return this; } } private MaintenanceThrottler getMaintenanceThrottlerFor(ContainerName containerName) { maintenanceThrottlerByContainerName.putIfAbsent(containerName, new MaintenanceThrottler()); return maintenanceThrottlerByContainerName.get(containerName); } private class MaintenanceThrottler { private Instant nextRemoveOldFilesAt = Instant.EPOCH; private Instant nextHandleOldCoredumpsAt = Instant.EPOCH; void updateNextRemoveOldFilesTime() { nextRemoveOldFilesAt = clock.instant().plus(Duration.ofHours(1)); } boolean shouldRemoveOldFilesNow() { return !nextRemoveOldFilesAt.isAfter(clock.instant()); } void updateNextHandleCoredumpsTime() { nextHandleOldCoredumpsAt = clock.instant().plus(Duration.ofMinutes(5)); } boolean shouldHandleCoredumpsNow() { return !nextHandleOldCoredumpsAt.isAfter(clock.instant()); } void reset() { nextRemoveOldFilesAt = Instant.EPOCH; nextHandleOldCoredumpsAt = Instant.EPOCH; } } }
class StorageMaintainer { private static final ContainerName NODE_ADMIN = new ContainerName("node-admin"); private static final ObjectMapper objectMapper = new ObjectMapper(); private final GaugeWrapper numberOfCoredumpsOnHost; private final CounterWrapper numberOfNodeAdminMaintenanceFails; private final DockerOperations dockerOperations; private final ProcessExecuter processExecuter; private final Environment environment; private final Clock clock; private Map<ContainerName, MaintenanceThrottler> maintenanceThrottlerByContainerName = new ConcurrentHashMap<>(); public StorageMaintainer(DockerOperations dockerOperations, ProcessExecuter processExecuter, MetricReceiverWrapper metricReceiver, Environment environment, Clock clock) { this.dockerOperations = dockerOperations; this.processExecuter = processExecuter; this.environment = environment; this.clock = clock; Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); numberOfNodeAdminMaintenanceFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.maintenance.fails"); numberOfCoredumpsOnHost = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.coredumps"); } private SecretAgentCheckConfig annotatedCheck(NodeSpec node, SecretAgentCheckConfig check) { check.withTag("namespace", "Vespa") .withTag("role", "tenants") .withTag("flavor", node.getFlavor()) .withTag("canonicalFlavor", node.getCanonicalFlavor()) .withTag("state", node.getState().toString()) .withTag("zone", environment.getZone()) .withTag("parentHostname", environment.getParentHostHostname()); node.getOwner().ifPresent(owner -> check .withTag("tenantName", owner.getTenant()) .withTag("app", owner.getApplication() + "." + owner.getInstance()) .withTag("applicationName", owner.getApplication()) .withTag("instanceName", owner.getInstance()) .withTag("applicationId", owner.getTenant() + "." + owner.getApplication() + "." + owner.getInstance())); node.getMembership().ifPresent(membership -> check .withTag("clustertype", membership.getClusterType()) .withTag("clusterid", membership.getClusterId())); node.getVespaVersion().ifPresent(version -> check.withTag("vespaVersion", version)); return check; } public void writeFilebeatConfig(ContainerName containerName, NodeSpec node) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); try { FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment); Optional<String> config = filebeatConfigProvider.getConfig(node); if (!config.isPresent()) { logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + node.toString()); return; } Path filebeatPath = environment.pathInNodeAdminFromPathInNode( containerName, Paths.get("/etc/filebeat/filebeat.yml")); Files.write(filebeatPath, config.get().getBytes()); logger.info("Wrote filebeat config."); } catch (Throwable t) { logger.error("Failed writing filebeat config; " + node, t); } } public Optional<Long> getDiskUsageFor(ContainerName containerName) { Path containerDir = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/home/")); try { return Optional.of(getDiskUsedInBytes(containerDir)); } catch (Throwable e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); logger.error("Problems during disk usage calculations in " + containerDir.toAbsolutePath(), e); return Optional.empty(); } } long getDiskUsedInBytes(Path path) throws IOException, InterruptedException { if (!Files.exists(path)) { return 0; } final String[] command = {"du", "-xsk", path.toString()}; Process duCommand = new ProcessBuilder().command(command).start(); if (!duCommand.waitFor(60, TimeUnit.SECONDS)) { duCommand.destroy(); throw new RuntimeException("Disk usage command timed out, aborting."); } String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream())); String[] results = output.split("\t"); if (results.length != 2) { throw new RuntimeException("Result from disk usage command not as expected: " + output); } long diskUsageKB = Long.valueOf(results[0]); return diskUsageKB * 1024; } /** * Deletes old log files for vespa, nginx, logstash, etc. */ public void removeOldFilesFromNode(ContainerName containerName) { if (! getMaintenanceThrottlerFor(containerName).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextRemoveOldFilesTime(); } private void addRemoveOldFilesCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName) { Path[] pathsToClean = { environment.pathInNodeUnderVespaHome("logs/elasticsearch2"), environment.pathInNodeUnderVespaHome("logs/logstash2"), environment.pathInNodeUnderVespaHome("logs/daemontools_y"), environment.pathInNodeUnderVespaHome("logs/nginx"), environment.pathInNodeUnderVespaHome("logs/vespa") }; for (Path pathToClean : pathsToClean) { Path path = environment.pathInNodeAdminFromPathInNode(containerName, pathToClean); if (Files.exists(path)) { maintainerExecutor.addJob("delete-files") .withArgument("basePath", path) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*\\.log.+") .withArgument("recursive", false); } } Path qrsDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/qrs")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", qrsDir) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*QueryAccessLog.*") .withArgument("recursive", false); Path logArchiveDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/logarchive")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", logArchiveDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); } /** * Checks if container has any new coredumps, reports and archives them if so * * @param force Set to true to bypass throttling */ public void handleCoreDumpsForContainer(ContainerName containerName, NodeSpec node, boolean force) { try { numberOfCoredumpsOnHost.sample(Files.list(environment.pathInNodeAdminToDoneCoredumps()).count()); } catch (IOException e) { } if (! getMaintenanceThrottlerFor(containerName).shouldHandleCoredumpsNow() && !force) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextHandleCoredumpsTime(); } private void addHandleCoredumpsCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName, NodeSpec node) { if (!environment.getCoredumpFeedEndpoint().isPresent()) { return; } Map<String, Object> attributes = new HashMap<>(); attributes.put("hostname", node.getHostname()); attributes.put("parent_hostname", environment.getParentHostHostname()); attributes.put("region", environment.getRegion()); attributes.put("environment", environment.getEnvironment()); attributes.put("flavor", node.getFlavor()); attributes.put("kernel_version", System.getProperty("os.version")); node.getCurrentDockerImage().ifPresent(image -> attributes.put("docker_image", image.asString())); node.getVespaVersion().ifPresent(version -> attributes.put("vespa_version", version)); node.getOwner().ifPresent(owner -> { attributes.put("tenant", owner.getTenant()); attributes.put("application", owner.getApplication()); attributes.put("instance", owner.getInstance()); }); maintainerExecutor.addJob("handle-core-dumps") .withArgument("doneCoredumpsPath", environment.pathInNodeAdminToDoneCoredumps()) .withArgument("coredumpsPath", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/crash"))) .withArgument("feedEndpoint", environment.getCoredumpFeedEndpoint().get()) .withArgument("attributes", attributes); } /** * Deletes old * * archived app data * * Vespa logs * * Filedistribution files */ public void cleanNodeAdmin() { if (! getMaintenanceThrottlerFor(NODE_ADMIN).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); maintainerExecutor.addJob("delete-directories") .withArgument("basePath", environment.getPathResolver().getApplicationStoragePathForNodeAdmin()) .withArgument("maxAgeSeconds", Duration.ofDays(7).getSeconds()) .withArgument("dirNameRegex", "^" + Pattern.quote(Environment.APPLICATION_STORAGE_CLEANUP_PATH_PREFIX)); Path nodeAdminJDiskLogsPath = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("logs/vespa/")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", nodeAdminJDiskLogsPath) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); maintainerExecutor.execute(); getMaintenanceThrottlerFor(NODE_ADMIN).updateNextRemoveOldFilesTime(); } /** * Prepares the container-storage for the next container by deleting/archiving all the data of the current container. * Removes old files, reports coredumps and archives container data, runs when container enters state "dirty" */ public void cleanupNodeStorage(ContainerName containerName, NodeSpec node) { MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); addArchiveNodeData(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).reset(); } private void addArchiveNodeData(MaintainerExecutor maintainerExecutor, ContainerName containerName) { maintainerExecutor.addJob("recursive-delete") .withArgument("path", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var"))); maintainerExecutor.addJob("move-files") .withArgument("from", environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/"))) .withArgument("to", environment.pathInNodeAdminToNodeCleanup(containerName)); } /** * Runs node-maintainer's SpecVerifier and returns its output * @param node Node specification containing the excepted values we want to verify against * @return new combined hardware divergence * @throws RuntimeException if exit code != 0 */ public String getHardwareDivergence(NodeSpec node) { List<String> arguments = new ArrayList<>(Arrays.asList("specification", "--disk", Double.toString(node.getMinDiskAvailableGb()), "--memory", Double.toString(node.getMinMainMemoryAvailableGb()), "--cpu_cores", Double.toString(node.getMinCpuCores()), "--is_ssd", Boolean.toString(node.isFastDisk()), "--ips", String.join(",", node.getIpAddresses()))); node.getHardwareDivergence().ifPresent(hardwareDivergence -> { arguments.add("--divergence"); arguments.add(hardwareDivergence); }); return executeMaintainer("com.yahoo.vespa.hosted.node.verification.Main", arguments.toArray(new String[0])); } private String executeMaintainer(String mainClass, String... args) { String[] command = Stream.concat( Stream.of("sudo", "VESPA_HOME=" + getDefaults().vespaHome(), getDefaults().underVespaHome("libexec/vespa/node-admin/maintenance.sh"), mainClass), Stream.of(args)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(command); if (result.getFirst() != 0) { numberOfNodeAdminMaintenanceFails.add(); throw new RuntimeException( String.format("Maintainer failed to execute command: %s, Exit code: %d, Stdout/stderr: %s", Arrays.toString(command), result.getFirst(), result.getSecond())); } return result.getSecond().trim(); } catch (IOException e) { throw new RuntimeException("Failed to execute maintainer", e); } } /** * Wrapper for node-admin-maintenance, queues up maintenances jobs and sends a single request to maintenance JVM */ private class MaintainerExecutor { private final List<MaintainerExecutorJob> jobs = new ArrayList<>(); MaintainerExecutorJob addJob(String jobName) { MaintainerExecutorJob job = new MaintainerExecutorJob(jobName); jobs.add(job); return job; } void execute() { String args; try { args = objectMapper.writeValueAsString(jobs); } catch (JsonProcessingException e) { throw new RuntimeException("Failed transform list of maintenance jobs to JSON"); } executeMaintainer("com.yahoo.vespa.hosted.node.maintainer.Maintainer", args); } } private class MaintainerExecutorJob { @JsonProperty(value="type") private final String type; @JsonProperty(value="arguments") private final Map<String, Object> arguments = new HashMap<>(); MaintainerExecutorJob(String type) { this.type = type; } MaintainerExecutorJob withArgument(String argument, Object value) { arguments.put(argument, (value instanceof Path) ? value.toString() : value); return this; } } private MaintenanceThrottler getMaintenanceThrottlerFor(ContainerName containerName) { maintenanceThrottlerByContainerName.putIfAbsent(containerName, new MaintenanceThrottler()); return maintenanceThrottlerByContainerName.get(containerName); } private class MaintenanceThrottler { private Instant nextRemoveOldFilesAt = Instant.EPOCH; private Instant nextHandleOldCoredumpsAt = Instant.EPOCH; void updateNextRemoveOldFilesTime() { nextRemoveOldFilesAt = clock.instant().plus(Duration.ofHours(1)); } boolean shouldRemoveOldFilesNow() { return !nextRemoveOldFilesAt.isAfter(clock.instant()); } void updateNextHandleCoredumpsTime() { nextHandleOldCoredumpsAt = clock.instant().plus(Duration.ofMinutes(5)); } boolean shouldHandleCoredumpsNow() { return !nextHandleOldCoredumpsAt.isAfter(clock.instant()); } void reset() { nextRemoveOldFilesAt = Instant.EPOCH; nextHandleOldCoredumpsAt = Instant.EPOCH; } } }
`host-life` -> `ntp`
public void writeMetricsConfig(ContainerName containerName, NodeSpec node) { List<SecretAgentCheckConfig> configs = new ArrayList<>(); Path vespaCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa"); SecretAgentCheckConfig vespaSchedule = new SecretAgentCheckConfig("vespa", 60, vespaCheckPath, "all"); configs.add(annotatedCheck(node, vespaSchedule)); Path vespaHealthCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa_health"); SecretAgentCheckConfig vespaHealthSchedule = new SecretAgentCheckConfig("vespa", 60, vespaHealthCheckPath, "all"); configs.add(annotatedCheck(node, vespaHealthSchedule)); Path hostLifeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_host_life"); SecretAgentCheckConfig hostLifeSchedule = new SecretAgentCheckConfig("host-life", 60, hostLifeCheckPath); configs.add(annotatedCheck(node, hostLifeSchedule)); Path ntpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ntp"); SecretAgentCheckConfig ntpSchedule = new SecretAgentCheckConfig("host-life", 60, ntpCheckPath); configs.add(annotatedCheck(node, ntpSchedule)); Path coredumpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_coredumps"); SecretAgentCheckConfig coredumpSchedule = new SecretAgentCheckConfig("system-coredumps-processing", 300, coredumpCheckPath, "--application", "system-coredumps-processing", "--lastmin", "129600", "--crit", "1", "--coredir", "/home/y/var/crash/processing"); configs.add(annotatedCheck(node, coredumpSchedule)); if (node.getNodeType() == NodeType.config) { Path configServerCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ymonsb2"); SecretAgentCheckConfig configServerSchedule = new SecretAgentCheckConfig("configserver", 60, configServerCheckPath, "-zero", "configserver"); configs.add(annotatedCheck(node, configServerSchedule)); Path zkbackupCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig zkbackupSchedule = new SecretAgentCheckConfig("zkbackupage", 300, zkbackupCheckPath, "-f", "/home/y/var/vespa-hosted/zkbackup.stat", "-m", "150", "-a", "config-zkbackupage"); configs.add(annotatedCheck(node, zkbackupSchedule)); } if (node.getNodeType() == NodeType.proxy) { Path routingAgeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig routingAgeSchedule = new SecretAgentCheckConfig("routing-configage", 60, routingAgeCheckPath, "-f", "home/y/var/vespa-hosted/routing/nginx.conf", "-m", "90", "-a", "routing-configage"); configs.add(annotatedCheck(node, routingAgeSchedule)); Path sslCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ssl_status"); SecretAgentCheckConfig sslSchedule = new SecretAgentCheckConfig("zkbackupage", 300, sslCheckPath, "-e", "localhost", "-p", "4443", "-t", "30"); configs.add(annotatedCheck(node, sslSchedule)); } Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/etc/yamas-agent/")); configs.forEach(s -> IOExceptionUtil.uncheck(() -> s.writeTo(yamasAgentFolder))); final String[] restartYamasAgent = new String[]{"service", "yamas-agent", "restart"}; dockerOperations.executeCommandInContainerAsRoot(containerName, restartYamasAgent); }
SecretAgentCheckConfig ntpSchedule = new SecretAgentCheckConfig("host-life", 60, ntpCheckPath);
public void writeMetricsConfig(ContainerName containerName, NodeSpec node) { List<SecretAgentCheckConfig> configs = new ArrayList<>(); Path hostLifeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_host_life"); SecretAgentCheckConfig hostLifeSchedule = new SecretAgentCheckConfig("host-life", 60, hostLifeCheckPath); configs.add(annotatedCheck(node, hostLifeSchedule)); Path ntpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ntp"); SecretAgentCheckConfig ntpSchedule = new SecretAgentCheckConfig("ntp", 60, ntpCheckPath); configs.add(annotatedCheck(node, ntpSchedule)); Path coredumpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_coredumps"); SecretAgentCheckConfig coredumpSchedule = new SecretAgentCheckConfig("system-coredumps-processing", 300, coredumpCheckPath, "--application", "system-coredumps-processing", "--lastmin", "129600", "--crit", "1", "--coredir", environment.pathInNodeUnderVespaHome("var/crash/processing").toString()); configs.add(annotatedCheck(node, coredumpSchedule)); if (node.getNodeType() != NodeType.config) { Path vespaHealthCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa_health"); SecretAgentCheckConfig vespaHealthSchedule = new SecretAgentCheckConfig("vespa-health", 60, vespaHealthCheckPath, "all"); configs.add(annotatedCheck(node, vespaHealthSchedule)); Path vespaCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa"); SecretAgentCheckConfig vespaSchedule = new SecretAgentCheckConfig("vespa", 60, vespaCheckPath, "all"); configs.add(annotatedCheck(node, vespaSchedule)); } if (node.getNodeType() == NodeType.config) { Path configServerCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ymonsb2"); SecretAgentCheckConfig configServerSchedule = new SecretAgentCheckConfig("configserver", 60, configServerCheckPath, "-zero", "configserver"); configs.add(annotatedCheck(node, configServerSchedule)); Path zkbackupCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig zkbackupSchedule = new SecretAgentCheckConfig("zkbackupage", 300, zkbackupCheckPath, "-f", environment.pathInNodeUnderVespaHome("var/vespa-hosted/zkbackup.stat").toString(), "-m", "150", "-a", "config-zkbackupage"); configs.add(annotatedCheck(node, zkbackupSchedule)); } if (node.getNodeType() == NodeType.proxy) { Path routingAgeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig routingAgeSchedule = new SecretAgentCheckConfig("routing-configage", 60, routingAgeCheckPath, "-f", environment.pathInNodeUnderVespaHome("var/vespa-hosted/routing/nginx.conf").toString(), "-m", "90", "-a", "routing-configage"); configs.add(annotatedCheck(node, routingAgeSchedule)); Path sslCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ssl_status"); SecretAgentCheckConfig sslSchedule = new SecretAgentCheckConfig("ssl-status", 300, sslCheckPath, "-e", "localhost", "-p", "4443", "-t", "30"); configs.add(annotatedCheck(node, sslSchedule)); } Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/etc/yamas-agent/")); configs.forEach(s -> IOExceptionUtil.uncheck(() -> s.writeTo(yamasAgentFolder))); final String[] restartYamasAgent = new String[]{"service", "yamas-agent", "restart"}; dockerOperations.executeCommandInContainerAsRoot(containerName, restartYamasAgent); }
class StorageMaintainer { private static final ContainerName NODE_ADMIN = new ContainerName("node-admin"); private static final ObjectMapper objectMapper = new ObjectMapper(); private final GaugeWrapper numberOfCoredumpsOnHost; private final CounterWrapper numberOfNodeAdminMaintenanceFails; private final DockerOperations dockerOperations; private final ProcessExecuter processExecuter; private final Environment environment; private final Clock clock; private Map<ContainerName, MaintenanceThrottler> maintenanceThrottlerByContainerName = new ConcurrentHashMap<>(); public StorageMaintainer(DockerOperations dockerOperations, ProcessExecuter processExecuter, MetricReceiverWrapper metricReceiver, Environment environment, Clock clock) { this.dockerOperations = dockerOperations; this.processExecuter = processExecuter; this.environment = environment; this.clock = clock; Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); numberOfNodeAdminMaintenanceFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.maintenance.fails"); numberOfCoredumpsOnHost = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.coredumps"); } private SecretAgentCheckConfig annotatedCheck(NodeSpec node, SecretAgentCheckConfig check) { check.withTag("namespace", "Vespa") .withTag("role", "tenants") .withTag("flavor", node.getFlavor()) .withTag("canonicalFlavor", node.getCanonicalFlavor()) .withTag("state", node.getState().toString()) .withTag("zone", environment.getZone()) .withTag("parentHostname", environment.getParentHostHostname()); node.getOwner().ifPresent(owner -> check .withTag("tenantName", owner.getTenant()) .withTag("app", owner.getApplication() + "." + owner.getInstance()) .withTag("applicationName", owner.getApplication()) .withTag("instanceName", owner.getInstance()) .withTag("applicationId", owner.getTenant() + "." + owner.getApplication() + "." + owner.getInstance())); node.getMembership().ifPresent(membership -> check .withTag("clustertype", membership.getClusterType()) .withTag("clusterid", membership.getClusterId())); node.getVespaVersion().ifPresent(version -> check.withTag("vespaVersion", version)); return check; } public void writeFilebeatConfig(ContainerName containerName, NodeSpec node) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); try { FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment); Optional<String> config = filebeatConfigProvider.getConfig(node); if (!config.isPresent()) { logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + node.toString()); return; } Path filebeatPath = environment.pathInNodeAdminFromPathInNode( containerName, Paths.get("/etc/filebeat/filebeat.yml")); Files.write(filebeatPath, config.get().getBytes()); logger.info("Wrote filebeat config."); } catch (Throwable t) { logger.error("Failed writing filebeat config; " + node, t); } } public Optional<Long> getDiskUsageFor(ContainerName containerName) { Path containerDir = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/home/")); try { return Optional.of(getDiskUsedInBytes(containerDir)); } catch (Throwable e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); logger.error("Problems during disk usage calculations in " + containerDir.toAbsolutePath(), e); return Optional.empty(); } } long getDiskUsedInBytes(Path path) throws IOException, InterruptedException { if (!Files.exists(path)) { return 0; } final String[] command = {"du", "-xsk", path.toString()}; Process duCommand = new ProcessBuilder().command(command).start(); if (!duCommand.waitFor(60, TimeUnit.SECONDS)) { duCommand.destroy(); throw new RuntimeException("Disk usage command timed out, aborting."); } String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream())); String[] results = output.split("\t"); if (results.length != 2) { throw new RuntimeException("Result from disk usage command not as expected: " + output); } long diskUsageKB = Long.valueOf(results[0]); return diskUsageKB * 1024; } /** * Deletes old log files for vespa, nginx, logstash, etc. */ public void removeOldFilesFromNode(ContainerName containerName) { if (! getMaintenanceThrottlerFor(containerName).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextRemoveOldFilesTime(); } private void addRemoveOldFilesCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName) { Path[] pathsToClean = { environment.pathInNodeUnderVespaHome("logs/elasticsearch2"), environment.pathInNodeUnderVespaHome("logs/logstash2"), environment.pathInNodeUnderVespaHome("logs/daemontools_y"), environment.pathInNodeUnderVespaHome("logs/nginx"), environment.pathInNodeUnderVespaHome("logs/vespa") }; for (Path pathToClean : pathsToClean) { Path path = environment.pathInNodeAdminFromPathInNode(containerName, pathToClean); if (Files.exists(path)) { maintainerExecutor.addJob("delete-files") .withArgument("basePath", path) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*\\.log.+") .withArgument("recursive", false); } } Path qrsDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/qrs")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", qrsDir) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*QueryAccessLog.*") .withArgument("recursive", false); Path logArchiveDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/logarchive")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", logArchiveDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); } /** * Checks if container has any new coredumps, reports and archives them if so * * @param force Set to true to bypass throttling */ public void handleCoreDumpsForContainer(ContainerName containerName, NodeSpec node, boolean force) { IOExceptionUtil.uncheck(() -> numberOfCoredumpsOnHost.sample(Files.list(environment.pathInNodeAdminToDoneCoredumps()).count())); if (! getMaintenanceThrottlerFor(containerName).shouldHandleCoredumpsNow() && !force) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextHandleCoredumpsTime(); } private void addHandleCoredumpsCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName, NodeSpec node) { if (!environment.getCoredumpFeedEndpoint().isPresent()) { return; } Map<String, Object> attributes = new HashMap<>(); attributes.put("hostname", node.getHostname()); attributes.put("parent_hostname", environment.getParentHostHostname()); attributes.put("region", environment.getRegion()); attributes.put("environment", environment.getEnvironment()); attributes.put("flavor", node.getFlavor()); attributes.put("kernel_version", System.getProperty("os.version")); node.getCurrentDockerImage().ifPresent(image -> attributes.put("docker_image", image.asString())); node.getVespaVersion().ifPresent(version -> attributes.put("vespa_version", version)); node.getOwner().ifPresent(owner -> { attributes.put("tenant", owner.getTenant()); attributes.put("application", owner.getApplication()); attributes.put("instance", owner.getInstance()); }); maintainerExecutor.addJob("handle-core-dumps") .withArgument("doneCoredumpsPath", environment.pathInNodeAdminToDoneCoredumps()) .withArgument("coredumpsPath", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/crash"))) .withArgument("feedEndpoint", environment.getCoredumpFeedEndpoint().get()) .withArgument("attributes", attributes); } /** * Deletes old * * archived app data * * Vespa logs * * Filedistribution files */ public void cleanNodeAdmin() { if (! getMaintenanceThrottlerFor(NODE_ADMIN).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); maintainerExecutor.addJob("delete-directories") .withArgument("basePath", environment.getPathResolver().getApplicationStoragePathForNodeAdmin()) .withArgument("maxAgeSeconds", Duration.ofDays(7).getSeconds()) .withArgument("dirNameRegex", "^" + Pattern.quote(Environment.APPLICATION_STORAGE_CLEANUP_PATH_PREFIX)); Path nodeAdminJDiskLogsPath = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("logs/vespa/")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", nodeAdminJDiskLogsPath) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); maintainerExecutor.execute(); getMaintenanceThrottlerFor(NODE_ADMIN).updateNextRemoveOldFilesTime(); } /** * Prepares the container-storage for the next container by deleting/archiving all the data of the current container. * Removes old files, reports coredumps and archives container data, runs when container enters state "dirty" */ public void cleanupNodeStorage(ContainerName containerName, NodeSpec node) { MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); addArchiveNodeData(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).reset(); } private void addArchiveNodeData(MaintainerExecutor maintainerExecutor, ContainerName containerName) { maintainerExecutor.addJob("recursive-delete") .withArgument("path", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var"))); maintainerExecutor.addJob("move-files") .withArgument("from", environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/"))) .withArgument("to", environment.pathInNodeAdminToNodeCleanup(containerName)); } /** * Runs node-maintainer's SpecVerifier and returns its output * @param node Node specification containing the excepted values we want to verify against * @return new combined hardware divergence * @throws RuntimeException if exit code != 0 */ public String getHardwareDivergence(NodeSpec node) { List<String> arguments = new ArrayList<>(Arrays.asList("specification", "--disk", Double.toString(node.getMinDiskAvailableGb()), "--memory", Double.toString(node.getMinMainMemoryAvailableGb()), "--cpu_cores", Double.toString(node.getMinCpuCores()), "--is_ssd", Boolean.toString(node.isFastDisk()), "--ips", String.join(",", node.getIpAddresses()))); node.getHardwareDivergence().ifPresent(hardwareDivergence -> { arguments.add("--divergence"); arguments.add(hardwareDivergence); }); return executeMaintainer("com.yahoo.vespa.hosted.node.verification.Main", arguments.toArray(new String[0])); } private String executeMaintainer(String mainClass, String... args) { String[] command = Stream.concat( Stream.of("sudo", "VESPA_HOME=" + getDefaults().vespaHome(), getDefaults().underVespaHome("libexec/vespa/node-admin/maintenance.sh"), mainClass), Stream.of(args)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(command); if (result.getFirst() != 0) { numberOfNodeAdminMaintenanceFails.add(); throw new RuntimeException( String.format("Maintainer failed to execute command: %s, Exit code: %d, Stdout/stderr: %s", Arrays.toString(command), result.getFirst(), result.getSecond())); } return result.getSecond().trim(); } catch (IOException e) { throw new RuntimeException("Failed to execute maintainer", e); } } /** * Wrapper for node-admin-maintenance, queues up maintenances jobs and sends a single request to maintenance JVM */ private class MaintainerExecutor { private final List<MaintainerExecutorJob> jobs = new ArrayList<>(); MaintainerExecutorJob addJob(String jobName) { MaintainerExecutorJob job = new MaintainerExecutorJob(jobName); jobs.add(job); return job; } void execute() { String args; try { args = objectMapper.writeValueAsString(jobs); } catch (JsonProcessingException e) { throw new RuntimeException("Failed transform list of maintenance jobs to JSON"); } executeMaintainer("com.yahoo.vespa.hosted.node.maintainer.Maintainer", args); } } private class MaintainerExecutorJob { @JsonProperty(value="type") private final String type; @JsonProperty(value="arguments") private final Map<String, Object> arguments = new HashMap<>(); MaintainerExecutorJob(String type) { this.type = type; } MaintainerExecutorJob withArgument(String argument, Object value) { arguments.put(argument, (value instanceof Path) ? value.toString() : value); return this; } } private MaintenanceThrottler getMaintenanceThrottlerFor(ContainerName containerName) { maintenanceThrottlerByContainerName.putIfAbsent(containerName, new MaintenanceThrottler()); return maintenanceThrottlerByContainerName.get(containerName); } private class MaintenanceThrottler { private Instant nextRemoveOldFilesAt = Instant.EPOCH; private Instant nextHandleOldCoredumpsAt = Instant.EPOCH; void updateNextRemoveOldFilesTime() { nextRemoveOldFilesAt = clock.instant().plus(Duration.ofHours(1)); } boolean shouldRemoveOldFilesNow() { return !nextRemoveOldFilesAt.isAfter(clock.instant()); } void updateNextHandleCoredumpsTime() { nextHandleOldCoredumpsAt = clock.instant().plus(Duration.ofMinutes(5)); } boolean shouldHandleCoredumpsNow() { return !nextHandleOldCoredumpsAt.isAfter(clock.instant()); } void reset() { nextRemoveOldFilesAt = Instant.EPOCH; nextHandleOldCoredumpsAt = Instant.EPOCH; } } }
class StorageMaintainer { private static final ContainerName NODE_ADMIN = new ContainerName("node-admin"); private static final ObjectMapper objectMapper = new ObjectMapper(); private final GaugeWrapper numberOfCoredumpsOnHost; private final CounterWrapper numberOfNodeAdminMaintenanceFails; private final DockerOperations dockerOperations; private final ProcessExecuter processExecuter; private final Environment environment; private final Clock clock; private Map<ContainerName, MaintenanceThrottler> maintenanceThrottlerByContainerName = new ConcurrentHashMap<>(); public StorageMaintainer(DockerOperations dockerOperations, ProcessExecuter processExecuter, MetricReceiverWrapper metricReceiver, Environment environment, Clock clock) { this.dockerOperations = dockerOperations; this.processExecuter = processExecuter; this.environment = environment; this.clock = clock; Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); numberOfNodeAdminMaintenanceFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.maintenance.fails"); numberOfCoredumpsOnHost = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.coredumps"); } private SecretAgentCheckConfig annotatedCheck(NodeSpec node, SecretAgentCheckConfig check) { check.withTag("namespace", "Vespa") .withTag("role", "tenants") .withTag("flavor", node.getFlavor()) .withTag("canonicalFlavor", node.getCanonicalFlavor()) .withTag("state", node.getState().toString()) .withTag("zone", environment.getZone()) .withTag("parentHostname", environment.getParentHostHostname()); node.getOwner().ifPresent(owner -> check .withTag("tenantName", owner.getTenant()) .withTag("app", owner.getApplication() + "." + owner.getInstance()) .withTag("applicationName", owner.getApplication()) .withTag("instanceName", owner.getInstance()) .withTag("applicationId", owner.getTenant() + "." + owner.getApplication() + "." + owner.getInstance())); node.getMembership().ifPresent(membership -> check .withTag("clustertype", membership.getClusterType()) .withTag("clusterid", membership.getClusterId())); node.getVespaVersion().ifPresent(version -> check.withTag("vespaVersion", version)); return check; } public void writeFilebeatConfig(ContainerName containerName, NodeSpec node) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); try { FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment); Optional<String> config = filebeatConfigProvider.getConfig(node); if (!config.isPresent()) { logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + node.toString()); return; } Path filebeatPath = environment.pathInNodeAdminFromPathInNode( containerName, Paths.get("/etc/filebeat/filebeat.yml")); Files.write(filebeatPath, config.get().getBytes()); logger.info("Wrote filebeat config."); } catch (Throwable t) { logger.error("Failed writing filebeat config; " + node, t); } } public Optional<Long> getDiskUsageFor(ContainerName containerName) { Path containerDir = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/home/")); try { return Optional.of(getDiskUsedInBytes(containerDir)); } catch (Throwable e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); logger.error("Problems during disk usage calculations in " + containerDir.toAbsolutePath(), e); return Optional.empty(); } } long getDiskUsedInBytes(Path path) throws IOException, InterruptedException { if (!Files.exists(path)) { return 0; } final String[] command = {"du", "-xsk", path.toString()}; Process duCommand = new ProcessBuilder().command(command).start(); if (!duCommand.waitFor(60, TimeUnit.SECONDS)) { duCommand.destroy(); throw new RuntimeException("Disk usage command timed out, aborting."); } String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream())); String[] results = output.split("\t"); if (results.length != 2) { throw new RuntimeException("Result from disk usage command not as expected: " + output); } long diskUsageKB = Long.valueOf(results[0]); return diskUsageKB * 1024; } /** * Deletes old log files for vespa, nginx, logstash, etc. */ public void removeOldFilesFromNode(ContainerName containerName) { if (! getMaintenanceThrottlerFor(containerName).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextRemoveOldFilesTime(); } private void addRemoveOldFilesCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName) { Path[] pathsToClean = { environment.pathInNodeUnderVespaHome("logs/elasticsearch2"), environment.pathInNodeUnderVespaHome("logs/logstash2"), environment.pathInNodeUnderVespaHome("logs/daemontools_y"), environment.pathInNodeUnderVespaHome("logs/nginx"), environment.pathInNodeUnderVespaHome("logs/vespa") }; for (Path pathToClean : pathsToClean) { Path path = environment.pathInNodeAdminFromPathInNode(containerName, pathToClean); if (Files.exists(path)) { maintainerExecutor.addJob("delete-files") .withArgument("basePath", path) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*\\.log.+") .withArgument("recursive", false); } } Path qrsDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/qrs")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", qrsDir) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*QueryAccessLog.*") .withArgument("recursive", false); Path logArchiveDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/logarchive")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", logArchiveDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); } /** * Checks if container has any new coredumps, reports and archives them if so * * @param force Set to true to bypass throttling */ public void handleCoreDumpsForContainer(ContainerName containerName, NodeSpec node, boolean force) { try { numberOfCoredumpsOnHost.sample(Files.list(environment.pathInNodeAdminToDoneCoredumps()).count()); } catch (IOException e) { } if (! getMaintenanceThrottlerFor(containerName).shouldHandleCoredumpsNow() && !force) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextHandleCoredumpsTime(); } private void addHandleCoredumpsCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName, NodeSpec node) { if (!environment.getCoredumpFeedEndpoint().isPresent()) { return; } Map<String, Object> attributes = new HashMap<>(); attributes.put("hostname", node.getHostname()); attributes.put("parent_hostname", environment.getParentHostHostname()); attributes.put("region", environment.getRegion()); attributes.put("environment", environment.getEnvironment()); attributes.put("flavor", node.getFlavor()); attributes.put("kernel_version", System.getProperty("os.version")); node.getCurrentDockerImage().ifPresent(image -> attributes.put("docker_image", image.asString())); node.getVespaVersion().ifPresent(version -> attributes.put("vespa_version", version)); node.getOwner().ifPresent(owner -> { attributes.put("tenant", owner.getTenant()); attributes.put("application", owner.getApplication()); attributes.put("instance", owner.getInstance()); }); maintainerExecutor.addJob("handle-core-dumps") .withArgument("doneCoredumpsPath", environment.pathInNodeAdminToDoneCoredumps()) .withArgument("coredumpsPath", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/crash"))) .withArgument("feedEndpoint", environment.getCoredumpFeedEndpoint().get()) .withArgument("attributes", attributes); } /** * Deletes old * * archived app data * * Vespa logs * * Filedistribution files */ public void cleanNodeAdmin() { if (! getMaintenanceThrottlerFor(NODE_ADMIN).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); maintainerExecutor.addJob("delete-directories") .withArgument("basePath", environment.getPathResolver().getApplicationStoragePathForNodeAdmin()) .withArgument("maxAgeSeconds", Duration.ofDays(7).getSeconds()) .withArgument("dirNameRegex", "^" + Pattern.quote(Environment.APPLICATION_STORAGE_CLEANUP_PATH_PREFIX)); Path nodeAdminJDiskLogsPath = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("logs/vespa/")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", nodeAdminJDiskLogsPath) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); maintainerExecutor.execute(); getMaintenanceThrottlerFor(NODE_ADMIN).updateNextRemoveOldFilesTime(); } /** * Prepares the container-storage for the next container by deleting/archiving all the data of the current container. * Removes old files, reports coredumps and archives container data, runs when container enters state "dirty" */ public void cleanupNodeStorage(ContainerName containerName, NodeSpec node) { MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); addArchiveNodeData(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).reset(); } private void addArchiveNodeData(MaintainerExecutor maintainerExecutor, ContainerName containerName) { maintainerExecutor.addJob("recursive-delete") .withArgument("path", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var"))); maintainerExecutor.addJob("move-files") .withArgument("from", environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/"))) .withArgument("to", environment.pathInNodeAdminToNodeCleanup(containerName)); } /** * Runs node-maintainer's SpecVerifier and returns its output * @param node Node specification containing the excepted values we want to verify against * @return new combined hardware divergence * @throws RuntimeException if exit code != 0 */ public String getHardwareDivergence(NodeSpec node) { List<String> arguments = new ArrayList<>(Arrays.asList("specification", "--disk", Double.toString(node.getMinDiskAvailableGb()), "--memory", Double.toString(node.getMinMainMemoryAvailableGb()), "--cpu_cores", Double.toString(node.getMinCpuCores()), "--is_ssd", Boolean.toString(node.isFastDisk()), "--ips", String.join(",", node.getIpAddresses()))); node.getHardwareDivergence().ifPresent(hardwareDivergence -> { arguments.add("--divergence"); arguments.add(hardwareDivergence); }); return executeMaintainer("com.yahoo.vespa.hosted.node.verification.Main", arguments.toArray(new String[0])); } private String executeMaintainer(String mainClass, String... args) { String[] command = Stream.concat( Stream.of("sudo", "VESPA_HOME=" + getDefaults().vespaHome(), getDefaults().underVespaHome("libexec/vespa/node-admin/maintenance.sh"), mainClass), Stream.of(args)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(command); if (result.getFirst() != 0) { numberOfNodeAdminMaintenanceFails.add(); throw new RuntimeException( String.format("Maintainer failed to execute command: %s, Exit code: %d, Stdout/stderr: %s", Arrays.toString(command), result.getFirst(), result.getSecond())); } return result.getSecond().trim(); } catch (IOException e) { throw new RuntimeException("Failed to execute maintainer", e); } } /** * Wrapper for node-admin-maintenance, queues up maintenances jobs and sends a single request to maintenance JVM */ private class MaintainerExecutor { private final List<MaintainerExecutorJob> jobs = new ArrayList<>(); MaintainerExecutorJob addJob(String jobName) { MaintainerExecutorJob job = new MaintainerExecutorJob(jobName); jobs.add(job); return job; } void execute() { String args; try { args = objectMapper.writeValueAsString(jobs); } catch (JsonProcessingException e) { throw new RuntimeException("Failed transform list of maintenance jobs to JSON"); } executeMaintainer("com.yahoo.vespa.hosted.node.maintainer.Maintainer", args); } } private class MaintainerExecutorJob { @JsonProperty(value="type") private final String type; @JsonProperty(value="arguments") private final Map<String, Object> arguments = new HashMap<>(); MaintainerExecutorJob(String type) { this.type = type; } MaintainerExecutorJob withArgument(String argument, Object value) { arguments.put(argument, (value instanceof Path) ? value.toString() : value); return this; } } private MaintenanceThrottler getMaintenanceThrottlerFor(ContainerName containerName) { maintenanceThrottlerByContainerName.putIfAbsent(containerName, new MaintenanceThrottler()); return maintenanceThrottlerByContainerName.get(containerName); } private class MaintenanceThrottler { private Instant nextRemoveOldFilesAt = Instant.EPOCH; private Instant nextHandleOldCoredumpsAt = Instant.EPOCH; void updateNextRemoveOldFilesTime() { nextRemoveOldFilesAt = clock.instant().plus(Duration.ofHours(1)); } boolean shouldRemoveOldFilesNow() { return !nextRemoveOldFilesAt.isAfter(clock.instant()); } void updateNextHandleCoredumpsTime() { nextHandleOldCoredumpsAt = clock.instant().plus(Duration.ofMinutes(5)); } boolean shouldHandleCoredumpsNow() { return !nextHandleOldCoredumpsAt.isAfter(clock.instant()); } void reset() { nextRemoveOldFilesAt = Instant.EPOCH; nextHandleOldCoredumpsAt = Instant.EPOCH; } } }
The check is ran in the container right?
public void writeMetricsConfig(ContainerName containerName, NodeSpec node) { List<SecretAgentCheckConfig> configs = new ArrayList<>(); Path vespaCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa"); SecretAgentCheckConfig vespaSchedule = new SecretAgentCheckConfig("vespa", 60, vespaCheckPath, "all"); configs.add(annotatedCheck(node, vespaSchedule)); Path vespaHealthCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa_health"); SecretAgentCheckConfig vespaHealthSchedule = new SecretAgentCheckConfig("vespa", 60, vespaHealthCheckPath, "all"); configs.add(annotatedCheck(node, vespaHealthSchedule)); Path hostLifeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_host_life"); SecretAgentCheckConfig hostLifeSchedule = new SecretAgentCheckConfig("host-life", 60, hostLifeCheckPath); configs.add(annotatedCheck(node, hostLifeSchedule)); Path ntpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ntp"); SecretAgentCheckConfig ntpSchedule = new SecretAgentCheckConfig("host-life", 60, ntpCheckPath); configs.add(annotatedCheck(node, ntpSchedule)); Path coredumpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_coredumps"); SecretAgentCheckConfig coredumpSchedule = new SecretAgentCheckConfig("system-coredumps-processing", 300, coredumpCheckPath, "--application", "system-coredumps-processing", "--lastmin", "129600", "--crit", "1", "--coredir", "/home/y/var/crash/processing"); configs.add(annotatedCheck(node, coredumpSchedule)); if (node.getNodeType() == NodeType.config) { Path configServerCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ymonsb2"); SecretAgentCheckConfig configServerSchedule = new SecretAgentCheckConfig("configserver", 60, configServerCheckPath, "-zero", "configserver"); configs.add(annotatedCheck(node, configServerSchedule)); Path zkbackupCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig zkbackupSchedule = new SecretAgentCheckConfig("zkbackupage", 300, zkbackupCheckPath, "-f", "/home/y/var/vespa-hosted/zkbackup.stat", "-m", "150", "-a", "config-zkbackupage"); configs.add(annotatedCheck(node, zkbackupSchedule)); } if (node.getNodeType() == NodeType.proxy) { Path routingAgeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig routingAgeSchedule = new SecretAgentCheckConfig("routing-configage", 60, routingAgeCheckPath, "-f", "home/y/var/vespa-hosted/routing/nginx.conf", "-m", "90", "-a", "routing-configage"); configs.add(annotatedCheck(node, routingAgeSchedule)); Path sslCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ssl_status"); SecretAgentCheckConfig sslSchedule = new SecretAgentCheckConfig("zkbackupage", 300, sslCheckPath, "-e", "localhost", "-p", "4443", "-t", "30"); configs.add(annotatedCheck(node, sslSchedule)); } Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/etc/yamas-agent/")); configs.forEach(s -> IOExceptionUtil.uncheck(() -> s.writeTo(yamasAgentFolder))); final String[] restartYamasAgent = new String[]{"service", "yamas-agent", "restart"}; dockerOperations.executeCommandInContainerAsRoot(containerName, restartYamasAgent); }
"129600", "--crit", "1", "--coredir", "/home/y/var/crash/processing");
public void writeMetricsConfig(ContainerName containerName, NodeSpec node) { List<SecretAgentCheckConfig> configs = new ArrayList<>(); Path hostLifeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_host_life"); SecretAgentCheckConfig hostLifeSchedule = new SecretAgentCheckConfig("host-life", 60, hostLifeCheckPath); configs.add(annotatedCheck(node, hostLifeSchedule)); Path ntpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ntp"); SecretAgentCheckConfig ntpSchedule = new SecretAgentCheckConfig("ntp", 60, ntpCheckPath); configs.add(annotatedCheck(node, ntpSchedule)); Path coredumpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_coredumps"); SecretAgentCheckConfig coredumpSchedule = new SecretAgentCheckConfig("system-coredumps-processing", 300, coredumpCheckPath, "--application", "system-coredumps-processing", "--lastmin", "129600", "--crit", "1", "--coredir", environment.pathInNodeUnderVespaHome("var/crash/processing").toString()); configs.add(annotatedCheck(node, coredumpSchedule)); if (node.getNodeType() != NodeType.config) { Path vespaHealthCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa_health"); SecretAgentCheckConfig vespaHealthSchedule = new SecretAgentCheckConfig("vespa-health", 60, vespaHealthCheckPath, "all"); configs.add(annotatedCheck(node, vespaHealthSchedule)); Path vespaCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa"); SecretAgentCheckConfig vespaSchedule = new SecretAgentCheckConfig("vespa", 60, vespaCheckPath, "all"); configs.add(annotatedCheck(node, vespaSchedule)); } if (node.getNodeType() == NodeType.config) { Path configServerCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ymonsb2"); SecretAgentCheckConfig configServerSchedule = new SecretAgentCheckConfig("configserver", 60, configServerCheckPath, "-zero", "configserver"); configs.add(annotatedCheck(node, configServerSchedule)); Path zkbackupCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig zkbackupSchedule = new SecretAgentCheckConfig("zkbackupage", 300, zkbackupCheckPath, "-f", environment.pathInNodeUnderVespaHome("var/vespa-hosted/zkbackup.stat").toString(), "-m", "150", "-a", "config-zkbackupage"); configs.add(annotatedCheck(node, zkbackupSchedule)); } if (node.getNodeType() == NodeType.proxy) { Path routingAgeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig routingAgeSchedule = new SecretAgentCheckConfig("routing-configage", 60, routingAgeCheckPath, "-f", environment.pathInNodeUnderVespaHome("var/vespa-hosted/routing/nginx.conf").toString(), "-m", "90", "-a", "routing-configage"); configs.add(annotatedCheck(node, routingAgeSchedule)); Path sslCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ssl_status"); SecretAgentCheckConfig sslSchedule = new SecretAgentCheckConfig("ssl-status", 300, sslCheckPath, "-e", "localhost", "-p", "4443", "-t", "30"); configs.add(annotatedCheck(node, sslSchedule)); } Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/etc/yamas-agent/")); configs.forEach(s -> IOExceptionUtil.uncheck(() -> s.writeTo(yamasAgentFolder))); final String[] restartYamasAgent = new String[]{"service", "yamas-agent", "restart"}; dockerOperations.executeCommandInContainerAsRoot(containerName, restartYamasAgent); }
class StorageMaintainer { private static final ContainerName NODE_ADMIN = new ContainerName("node-admin"); private static final ObjectMapper objectMapper = new ObjectMapper(); private final GaugeWrapper numberOfCoredumpsOnHost; private final CounterWrapper numberOfNodeAdminMaintenanceFails; private final DockerOperations dockerOperations; private final ProcessExecuter processExecuter; private final Environment environment; private final Clock clock; private Map<ContainerName, MaintenanceThrottler> maintenanceThrottlerByContainerName = new ConcurrentHashMap<>(); public StorageMaintainer(DockerOperations dockerOperations, ProcessExecuter processExecuter, MetricReceiverWrapper metricReceiver, Environment environment, Clock clock) { this.dockerOperations = dockerOperations; this.processExecuter = processExecuter; this.environment = environment; this.clock = clock; Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); numberOfNodeAdminMaintenanceFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.maintenance.fails"); numberOfCoredumpsOnHost = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.coredumps"); } private SecretAgentCheckConfig annotatedCheck(NodeSpec node, SecretAgentCheckConfig check) { check.withTag("namespace", "Vespa") .withTag("role", "tenants") .withTag("flavor", node.getFlavor()) .withTag("canonicalFlavor", node.getCanonicalFlavor()) .withTag("state", node.getState().toString()) .withTag("zone", environment.getZone()) .withTag("parentHostname", environment.getParentHostHostname()); node.getOwner().ifPresent(owner -> check .withTag("tenantName", owner.getTenant()) .withTag("app", owner.getApplication() + "." + owner.getInstance()) .withTag("applicationName", owner.getApplication()) .withTag("instanceName", owner.getInstance()) .withTag("applicationId", owner.getTenant() + "." + owner.getApplication() + "." + owner.getInstance())); node.getMembership().ifPresent(membership -> check .withTag("clustertype", membership.getClusterType()) .withTag("clusterid", membership.getClusterId())); node.getVespaVersion().ifPresent(version -> check.withTag("vespaVersion", version)); return check; } public void writeFilebeatConfig(ContainerName containerName, NodeSpec node) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); try { FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment); Optional<String> config = filebeatConfigProvider.getConfig(node); if (!config.isPresent()) { logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + node.toString()); return; } Path filebeatPath = environment.pathInNodeAdminFromPathInNode( containerName, Paths.get("/etc/filebeat/filebeat.yml")); Files.write(filebeatPath, config.get().getBytes()); logger.info("Wrote filebeat config."); } catch (Throwable t) { logger.error("Failed writing filebeat config; " + node, t); } } public Optional<Long> getDiskUsageFor(ContainerName containerName) { Path containerDir = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/home/")); try { return Optional.of(getDiskUsedInBytes(containerDir)); } catch (Throwable e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); logger.error("Problems during disk usage calculations in " + containerDir.toAbsolutePath(), e); return Optional.empty(); } } long getDiskUsedInBytes(Path path) throws IOException, InterruptedException { if (!Files.exists(path)) { return 0; } final String[] command = {"du", "-xsk", path.toString()}; Process duCommand = new ProcessBuilder().command(command).start(); if (!duCommand.waitFor(60, TimeUnit.SECONDS)) { duCommand.destroy(); throw new RuntimeException("Disk usage command timed out, aborting."); } String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream())); String[] results = output.split("\t"); if (results.length != 2) { throw new RuntimeException("Result from disk usage command not as expected: " + output); } long diskUsageKB = Long.valueOf(results[0]); return diskUsageKB * 1024; } /** * Deletes old log files for vespa, nginx, logstash, etc. */ public void removeOldFilesFromNode(ContainerName containerName) { if (! getMaintenanceThrottlerFor(containerName).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextRemoveOldFilesTime(); } private void addRemoveOldFilesCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName) { Path[] pathsToClean = { environment.pathInNodeUnderVespaHome("logs/elasticsearch2"), environment.pathInNodeUnderVespaHome("logs/logstash2"), environment.pathInNodeUnderVespaHome("logs/daemontools_y"), environment.pathInNodeUnderVespaHome("logs/nginx"), environment.pathInNodeUnderVespaHome("logs/vespa") }; for (Path pathToClean : pathsToClean) { Path path = environment.pathInNodeAdminFromPathInNode(containerName, pathToClean); if (Files.exists(path)) { maintainerExecutor.addJob("delete-files") .withArgument("basePath", path) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*\\.log.+") .withArgument("recursive", false); } } Path qrsDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/qrs")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", qrsDir) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*QueryAccessLog.*") .withArgument("recursive", false); Path logArchiveDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/logarchive")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", logArchiveDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); } /** * Checks if container has any new coredumps, reports and archives them if so * * @param force Set to true to bypass throttling */ public void handleCoreDumpsForContainer(ContainerName containerName, NodeSpec node, boolean force) { IOExceptionUtil.uncheck(() -> numberOfCoredumpsOnHost.sample(Files.list(environment.pathInNodeAdminToDoneCoredumps()).count())); if (! getMaintenanceThrottlerFor(containerName).shouldHandleCoredumpsNow() && !force) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextHandleCoredumpsTime(); } private void addHandleCoredumpsCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName, NodeSpec node) { if (!environment.getCoredumpFeedEndpoint().isPresent()) { return; } Map<String, Object> attributes = new HashMap<>(); attributes.put("hostname", node.getHostname()); attributes.put("parent_hostname", environment.getParentHostHostname()); attributes.put("region", environment.getRegion()); attributes.put("environment", environment.getEnvironment()); attributes.put("flavor", node.getFlavor()); attributes.put("kernel_version", System.getProperty("os.version")); node.getCurrentDockerImage().ifPresent(image -> attributes.put("docker_image", image.asString())); node.getVespaVersion().ifPresent(version -> attributes.put("vespa_version", version)); node.getOwner().ifPresent(owner -> { attributes.put("tenant", owner.getTenant()); attributes.put("application", owner.getApplication()); attributes.put("instance", owner.getInstance()); }); maintainerExecutor.addJob("handle-core-dumps") .withArgument("doneCoredumpsPath", environment.pathInNodeAdminToDoneCoredumps()) .withArgument("coredumpsPath", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/crash"))) .withArgument("feedEndpoint", environment.getCoredumpFeedEndpoint().get()) .withArgument("attributes", attributes); } /** * Deletes old * * archived app data * * Vespa logs * * Filedistribution files */ public void cleanNodeAdmin() { if (! getMaintenanceThrottlerFor(NODE_ADMIN).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); maintainerExecutor.addJob("delete-directories") .withArgument("basePath", environment.getPathResolver().getApplicationStoragePathForNodeAdmin()) .withArgument("maxAgeSeconds", Duration.ofDays(7).getSeconds()) .withArgument("dirNameRegex", "^" + Pattern.quote(Environment.APPLICATION_STORAGE_CLEANUP_PATH_PREFIX)); Path nodeAdminJDiskLogsPath = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("logs/vespa/")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", nodeAdminJDiskLogsPath) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); maintainerExecutor.execute(); getMaintenanceThrottlerFor(NODE_ADMIN).updateNextRemoveOldFilesTime(); } /** * Prepares the container-storage for the next container by deleting/archiving all the data of the current container. * Removes old files, reports coredumps and archives container data, runs when container enters state "dirty" */ public void cleanupNodeStorage(ContainerName containerName, NodeSpec node) { MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); addArchiveNodeData(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).reset(); } private void addArchiveNodeData(MaintainerExecutor maintainerExecutor, ContainerName containerName) { maintainerExecutor.addJob("recursive-delete") .withArgument("path", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var"))); maintainerExecutor.addJob("move-files") .withArgument("from", environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/"))) .withArgument("to", environment.pathInNodeAdminToNodeCleanup(containerName)); } /** * Runs node-maintainer's SpecVerifier and returns its output * @param node Node specification containing the excepted values we want to verify against * @return new combined hardware divergence * @throws RuntimeException if exit code != 0 */ public String getHardwareDivergence(NodeSpec node) { List<String> arguments = new ArrayList<>(Arrays.asList("specification", "--disk", Double.toString(node.getMinDiskAvailableGb()), "--memory", Double.toString(node.getMinMainMemoryAvailableGb()), "--cpu_cores", Double.toString(node.getMinCpuCores()), "--is_ssd", Boolean.toString(node.isFastDisk()), "--ips", String.join(",", node.getIpAddresses()))); node.getHardwareDivergence().ifPresent(hardwareDivergence -> { arguments.add("--divergence"); arguments.add(hardwareDivergence); }); return executeMaintainer("com.yahoo.vespa.hosted.node.verification.Main", arguments.toArray(new String[0])); } private String executeMaintainer(String mainClass, String... args) { String[] command = Stream.concat( Stream.of("sudo", "VESPA_HOME=" + getDefaults().vespaHome(), getDefaults().underVespaHome("libexec/vespa/node-admin/maintenance.sh"), mainClass), Stream.of(args)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(command); if (result.getFirst() != 0) { numberOfNodeAdminMaintenanceFails.add(); throw new RuntimeException( String.format("Maintainer failed to execute command: %s, Exit code: %d, Stdout/stderr: %s", Arrays.toString(command), result.getFirst(), result.getSecond())); } return result.getSecond().trim(); } catch (IOException e) { throw new RuntimeException("Failed to execute maintainer", e); } } /** * Wrapper for node-admin-maintenance, queues up maintenances jobs and sends a single request to maintenance JVM */ private class MaintainerExecutor { private final List<MaintainerExecutorJob> jobs = new ArrayList<>(); MaintainerExecutorJob addJob(String jobName) { MaintainerExecutorJob job = new MaintainerExecutorJob(jobName); jobs.add(job); return job; } void execute() { String args; try { args = objectMapper.writeValueAsString(jobs); } catch (JsonProcessingException e) { throw new RuntimeException("Failed transform list of maintenance jobs to JSON"); } executeMaintainer("com.yahoo.vespa.hosted.node.maintainer.Maintainer", args); } } private class MaintainerExecutorJob { @JsonProperty(value="type") private final String type; @JsonProperty(value="arguments") private final Map<String, Object> arguments = new HashMap<>(); MaintainerExecutorJob(String type) { this.type = type; } MaintainerExecutorJob withArgument(String argument, Object value) { arguments.put(argument, (value instanceof Path) ? value.toString() : value); return this; } } private MaintenanceThrottler getMaintenanceThrottlerFor(ContainerName containerName) { maintenanceThrottlerByContainerName.putIfAbsent(containerName, new MaintenanceThrottler()); return maintenanceThrottlerByContainerName.get(containerName); } private class MaintenanceThrottler { private Instant nextRemoveOldFilesAt = Instant.EPOCH; private Instant nextHandleOldCoredumpsAt = Instant.EPOCH; void updateNextRemoveOldFilesTime() { nextRemoveOldFilesAt = clock.instant().plus(Duration.ofHours(1)); } boolean shouldRemoveOldFilesNow() { return !nextRemoveOldFilesAt.isAfter(clock.instant()); } void updateNextHandleCoredumpsTime() { nextHandleOldCoredumpsAt = clock.instant().plus(Duration.ofMinutes(5)); } boolean shouldHandleCoredumpsNow() { return !nextHandleOldCoredumpsAt.isAfter(clock.instant()); } void reset() { nextRemoveOldFilesAt = Instant.EPOCH; nextHandleOldCoredumpsAt = Instant.EPOCH; } } }
class StorageMaintainer { private static final ContainerName NODE_ADMIN = new ContainerName("node-admin"); private static final ObjectMapper objectMapper = new ObjectMapper(); private final GaugeWrapper numberOfCoredumpsOnHost; private final CounterWrapper numberOfNodeAdminMaintenanceFails; private final DockerOperations dockerOperations; private final ProcessExecuter processExecuter; private final Environment environment; private final Clock clock; private Map<ContainerName, MaintenanceThrottler> maintenanceThrottlerByContainerName = new ConcurrentHashMap<>(); public StorageMaintainer(DockerOperations dockerOperations, ProcessExecuter processExecuter, MetricReceiverWrapper metricReceiver, Environment environment, Clock clock) { this.dockerOperations = dockerOperations; this.processExecuter = processExecuter; this.environment = environment; this.clock = clock; Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); numberOfNodeAdminMaintenanceFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.maintenance.fails"); numberOfCoredumpsOnHost = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.coredumps"); } private SecretAgentCheckConfig annotatedCheck(NodeSpec node, SecretAgentCheckConfig check) { check.withTag("namespace", "Vespa") .withTag("role", "tenants") .withTag("flavor", node.getFlavor()) .withTag("canonicalFlavor", node.getCanonicalFlavor()) .withTag("state", node.getState().toString()) .withTag("zone", environment.getZone()) .withTag("parentHostname", environment.getParentHostHostname()); node.getOwner().ifPresent(owner -> check .withTag("tenantName", owner.getTenant()) .withTag("app", owner.getApplication() + "." + owner.getInstance()) .withTag("applicationName", owner.getApplication()) .withTag("instanceName", owner.getInstance()) .withTag("applicationId", owner.getTenant() + "." + owner.getApplication() + "." + owner.getInstance())); node.getMembership().ifPresent(membership -> check .withTag("clustertype", membership.getClusterType()) .withTag("clusterid", membership.getClusterId())); node.getVespaVersion().ifPresent(version -> check.withTag("vespaVersion", version)); return check; } public void writeFilebeatConfig(ContainerName containerName, NodeSpec node) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); try { FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment); Optional<String> config = filebeatConfigProvider.getConfig(node); if (!config.isPresent()) { logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + node.toString()); return; } Path filebeatPath = environment.pathInNodeAdminFromPathInNode( containerName, Paths.get("/etc/filebeat/filebeat.yml")); Files.write(filebeatPath, config.get().getBytes()); logger.info("Wrote filebeat config."); } catch (Throwable t) { logger.error("Failed writing filebeat config; " + node, t); } } public Optional<Long> getDiskUsageFor(ContainerName containerName) { Path containerDir = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/home/")); try { return Optional.of(getDiskUsedInBytes(containerDir)); } catch (Throwable e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); logger.error("Problems during disk usage calculations in " + containerDir.toAbsolutePath(), e); return Optional.empty(); } } long getDiskUsedInBytes(Path path) throws IOException, InterruptedException { if (!Files.exists(path)) { return 0; } final String[] command = {"du", "-xsk", path.toString()}; Process duCommand = new ProcessBuilder().command(command).start(); if (!duCommand.waitFor(60, TimeUnit.SECONDS)) { duCommand.destroy(); throw new RuntimeException("Disk usage command timed out, aborting."); } String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream())); String[] results = output.split("\t"); if (results.length != 2) { throw new RuntimeException("Result from disk usage command not as expected: " + output); } long diskUsageKB = Long.valueOf(results[0]); return diskUsageKB * 1024; } /** * Deletes old log files for vespa, nginx, logstash, etc. */ public void removeOldFilesFromNode(ContainerName containerName) { if (! getMaintenanceThrottlerFor(containerName).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextRemoveOldFilesTime(); } private void addRemoveOldFilesCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName) { Path[] pathsToClean = { environment.pathInNodeUnderVespaHome("logs/elasticsearch2"), environment.pathInNodeUnderVespaHome("logs/logstash2"), environment.pathInNodeUnderVespaHome("logs/daemontools_y"), environment.pathInNodeUnderVespaHome("logs/nginx"), environment.pathInNodeUnderVespaHome("logs/vespa") }; for (Path pathToClean : pathsToClean) { Path path = environment.pathInNodeAdminFromPathInNode(containerName, pathToClean); if (Files.exists(path)) { maintainerExecutor.addJob("delete-files") .withArgument("basePath", path) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*\\.log.+") .withArgument("recursive", false); } } Path qrsDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/qrs")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", qrsDir) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*QueryAccessLog.*") .withArgument("recursive", false); Path logArchiveDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/logarchive")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", logArchiveDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); } /** * Checks if container has any new coredumps, reports and archives them if so * * @param force Set to true to bypass throttling */ public void handleCoreDumpsForContainer(ContainerName containerName, NodeSpec node, boolean force) { try { numberOfCoredumpsOnHost.sample(Files.list(environment.pathInNodeAdminToDoneCoredumps()).count()); } catch (IOException e) { } if (! getMaintenanceThrottlerFor(containerName).shouldHandleCoredumpsNow() && !force) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextHandleCoredumpsTime(); } private void addHandleCoredumpsCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName, NodeSpec node) { if (!environment.getCoredumpFeedEndpoint().isPresent()) { return; } Map<String, Object> attributes = new HashMap<>(); attributes.put("hostname", node.getHostname()); attributes.put("parent_hostname", environment.getParentHostHostname()); attributes.put("region", environment.getRegion()); attributes.put("environment", environment.getEnvironment()); attributes.put("flavor", node.getFlavor()); attributes.put("kernel_version", System.getProperty("os.version")); node.getCurrentDockerImage().ifPresent(image -> attributes.put("docker_image", image.asString())); node.getVespaVersion().ifPresent(version -> attributes.put("vespa_version", version)); node.getOwner().ifPresent(owner -> { attributes.put("tenant", owner.getTenant()); attributes.put("application", owner.getApplication()); attributes.put("instance", owner.getInstance()); }); maintainerExecutor.addJob("handle-core-dumps") .withArgument("doneCoredumpsPath", environment.pathInNodeAdminToDoneCoredumps()) .withArgument("coredumpsPath", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/crash"))) .withArgument("feedEndpoint", environment.getCoredumpFeedEndpoint().get()) .withArgument("attributes", attributes); } /** * Deletes old * * archived app data * * Vespa logs * * Filedistribution files */ public void cleanNodeAdmin() { if (! getMaintenanceThrottlerFor(NODE_ADMIN).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); maintainerExecutor.addJob("delete-directories") .withArgument("basePath", environment.getPathResolver().getApplicationStoragePathForNodeAdmin()) .withArgument("maxAgeSeconds", Duration.ofDays(7).getSeconds()) .withArgument("dirNameRegex", "^" + Pattern.quote(Environment.APPLICATION_STORAGE_CLEANUP_PATH_PREFIX)); Path nodeAdminJDiskLogsPath = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("logs/vespa/")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", nodeAdminJDiskLogsPath) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); maintainerExecutor.execute(); getMaintenanceThrottlerFor(NODE_ADMIN).updateNextRemoveOldFilesTime(); } /** * Prepares the container-storage for the next container by deleting/archiving all the data of the current container. * Removes old files, reports coredumps and archives container data, runs when container enters state "dirty" */ public void cleanupNodeStorage(ContainerName containerName, NodeSpec node) { MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); addArchiveNodeData(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).reset(); } private void addArchiveNodeData(MaintainerExecutor maintainerExecutor, ContainerName containerName) { maintainerExecutor.addJob("recursive-delete") .withArgument("path", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var"))); maintainerExecutor.addJob("move-files") .withArgument("from", environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/"))) .withArgument("to", environment.pathInNodeAdminToNodeCleanup(containerName)); } /** * Runs node-maintainer's SpecVerifier and returns its output * @param node Node specification containing the excepted values we want to verify against * @return new combined hardware divergence * @throws RuntimeException if exit code != 0 */ public String getHardwareDivergence(NodeSpec node) { List<String> arguments = new ArrayList<>(Arrays.asList("specification", "--disk", Double.toString(node.getMinDiskAvailableGb()), "--memory", Double.toString(node.getMinMainMemoryAvailableGb()), "--cpu_cores", Double.toString(node.getMinCpuCores()), "--is_ssd", Boolean.toString(node.isFastDisk()), "--ips", String.join(",", node.getIpAddresses()))); node.getHardwareDivergence().ifPresent(hardwareDivergence -> { arguments.add("--divergence"); arguments.add(hardwareDivergence); }); return executeMaintainer("com.yahoo.vespa.hosted.node.verification.Main", arguments.toArray(new String[0])); } private String executeMaintainer(String mainClass, String... args) { String[] command = Stream.concat( Stream.of("sudo", "VESPA_HOME=" + getDefaults().vespaHome(), getDefaults().underVespaHome("libexec/vespa/node-admin/maintenance.sh"), mainClass), Stream.of(args)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(command); if (result.getFirst() != 0) { numberOfNodeAdminMaintenanceFails.add(); throw new RuntimeException( String.format("Maintainer failed to execute command: %s, Exit code: %d, Stdout/stderr: %s", Arrays.toString(command), result.getFirst(), result.getSecond())); } return result.getSecond().trim(); } catch (IOException e) { throw new RuntimeException("Failed to execute maintainer", e); } } /** * Wrapper for node-admin-maintenance, queues up maintenances jobs and sends a single request to maintenance JVM */ private class MaintainerExecutor { private final List<MaintainerExecutorJob> jobs = new ArrayList<>(); MaintainerExecutorJob addJob(String jobName) { MaintainerExecutorJob job = new MaintainerExecutorJob(jobName); jobs.add(job); return job; } void execute() { String args; try { args = objectMapper.writeValueAsString(jobs); } catch (JsonProcessingException e) { throw new RuntimeException("Failed transform list of maintenance jobs to JSON"); } executeMaintainer("com.yahoo.vespa.hosted.node.maintainer.Maintainer", args); } } private class MaintainerExecutorJob { @JsonProperty(value="type") private final String type; @JsonProperty(value="arguments") private final Map<String, Object> arguments = new HashMap<>(); MaintainerExecutorJob(String type) { this.type = type; } MaintainerExecutorJob withArgument(String argument, Object value) { arguments.put(argument, (value instanceof Path) ? value.toString() : value); return this; } } private MaintenanceThrottler getMaintenanceThrottlerFor(ContainerName containerName) { maintenanceThrottlerByContainerName.putIfAbsent(containerName, new MaintenanceThrottler()); return maintenanceThrottlerByContainerName.get(containerName); } private class MaintenanceThrottler { private Instant nextRemoveOldFilesAt = Instant.EPOCH; private Instant nextHandleOldCoredumpsAt = Instant.EPOCH; void updateNextRemoveOldFilesTime() { nextRemoveOldFilesAt = clock.instant().plus(Duration.ofHours(1)); } boolean shouldRemoveOldFilesNow() { return !nextRemoveOldFilesAt.isAfter(clock.instant()); } void updateNextHandleCoredumpsTime() { nextHandleOldCoredumpsAt = clock.instant().plus(Duration.ofMinutes(5)); } boolean shouldHandleCoredumpsNow() { return !nextHandleOldCoredumpsAt.isAfter(clock.instant()); } void reset() { nextRemoveOldFilesAt = Instant.EPOCH; nextHandleOldCoredumpsAt = Instant.EPOCH; } } }
Do you know why host_life fails on the configserver?
public void writeMetricsConfig(ContainerName containerName, NodeSpec node) { List<SecretAgentCheckConfig> configs = new ArrayList<>(); Path vespaCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa"); SecretAgentCheckConfig vespaSchedule = new SecretAgentCheckConfig("vespa", 60, vespaCheckPath, "all"); configs.add(annotatedCheck(node, vespaSchedule)); Path vespaHealthCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa_health"); SecretAgentCheckConfig vespaHealthSchedule = new SecretAgentCheckConfig("vespa", 60, vespaHealthCheckPath, "all"); configs.add(annotatedCheck(node, vespaHealthSchedule)); Path hostLifeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_host_life"); SecretAgentCheckConfig hostLifeSchedule = new SecretAgentCheckConfig("host-life", 60, hostLifeCheckPath); configs.add(annotatedCheck(node, hostLifeSchedule)); Path ntpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ntp"); SecretAgentCheckConfig ntpSchedule = new SecretAgentCheckConfig("host-life", 60, ntpCheckPath); configs.add(annotatedCheck(node, ntpSchedule)); Path coredumpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_coredumps"); SecretAgentCheckConfig coredumpSchedule = new SecretAgentCheckConfig("system-coredumps-processing", 300, coredumpCheckPath, "--application", "system-coredumps-processing", "--lastmin", "129600", "--crit", "1", "--coredir", "/home/y/var/crash/processing"); configs.add(annotatedCheck(node, coredumpSchedule)); if (node.getNodeType() == NodeType.config) { Path configServerCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ymonsb2"); SecretAgentCheckConfig configServerSchedule = new SecretAgentCheckConfig("configserver", 60, configServerCheckPath, "-zero", "configserver"); configs.add(annotatedCheck(node, configServerSchedule)); Path zkbackupCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig zkbackupSchedule = new SecretAgentCheckConfig("zkbackupage", 300, zkbackupCheckPath, "-f", "/home/y/var/vespa-hosted/zkbackup.stat", "-m", "150", "-a", "config-zkbackupage"); configs.add(annotatedCheck(node, zkbackupSchedule)); } if (node.getNodeType() == NodeType.proxy) { Path routingAgeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig routingAgeSchedule = new SecretAgentCheckConfig("routing-configage", 60, routingAgeCheckPath, "-f", "home/y/var/vespa-hosted/routing/nginx.conf", "-m", "90", "-a", "routing-configage"); configs.add(annotatedCheck(node, routingAgeSchedule)); Path sslCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ssl_status"); SecretAgentCheckConfig sslSchedule = new SecretAgentCheckConfig("zkbackupage", 300, sslCheckPath, "-e", "localhost", "-p", "4443", "-t", "30"); configs.add(annotatedCheck(node, sslSchedule)); } Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/etc/yamas-agent/")); configs.forEach(s -> IOExceptionUtil.uncheck(() -> s.writeTo(yamasAgentFolder))); final String[] restartYamasAgent = new String[]{"service", "yamas-agent", "restart"}; dockerOperations.executeCommandInContainerAsRoot(containerName, restartYamasAgent); }
public void writeMetricsConfig(ContainerName containerName, NodeSpec node) { List<SecretAgentCheckConfig> configs = new ArrayList<>(); Path hostLifeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_host_life"); SecretAgentCheckConfig hostLifeSchedule = new SecretAgentCheckConfig("host-life", 60, hostLifeCheckPath); configs.add(annotatedCheck(node, hostLifeSchedule)); Path ntpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ntp"); SecretAgentCheckConfig ntpSchedule = new SecretAgentCheckConfig("ntp", 60, ntpCheckPath); configs.add(annotatedCheck(node, ntpSchedule)); Path coredumpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_coredumps"); SecretAgentCheckConfig coredumpSchedule = new SecretAgentCheckConfig("system-coredumps-processing", 300, coredumpCheckPath, "--application", "system-coredumps-processing", "--lastmin", "129600", "--crit", "1", "--coredir", environment.pathInNodeUnderVespaHome("var/crash/processing").toString()); configs.add(annotatedCheck(node, coredumpSchedule)); if (node.getNodeType() != NodeType.config) { Path vespaHealthCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa_health"); SecretAgentCheckConfig vespaHealthSchedule = new SecretAgentCheckConfig("vespa-health", 60, vespaHealthCheckPath, "all"); configs.add(annotatedCheck(node, vespaHealthSchedule)); Path vespaCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa"); SecretAgentCheckConfig vespaSchedule = new SecretAgentCheckConfig("vespa", 60, vespaCheckPath, "all"); configs.add(annotatedCheck(node, vespaSchedule)); } if (node.getNodeType() == NodeType.config) { Path configServerCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ymonsb2"); SecretAgentCheckConfig configServerSchedule = new SecretAgentCheckConfig("configserver", 60, configServerCheckPath, "-zero", "configserver"); configs.add(annotatedCheck(node, configServerSchedule)); Path zkbackupCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig zkbackupSchedule = new SecretAgentCheckConfig("zkbackupage", 300, zkbackupCheckPath, "-f", environment.pathInNodeUnderVespaHome("var/vespa-hosted/zkbackup.stat").toString(), "-m", "150", "-a", "config-zkbackupage"); configs.add(annotatedCheck(node, zkbackupSchedule)); } if (node.getNodeType() == NodeType.proxy) { Path routingAgeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig routingAgeSchedule = new SecretAgentCheckConfig("routing-configage", 60, routingAgeCheckPath, "-f", environment.pathInNodeUnderVespaHome("var/vespa-hosted/routing/nginx.conf").toString(), "-m", "90", "-a", "routing-configage"); configs.add(annotatedCheck(node, routingAgeSchedule)); Path sslCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ssl_status"); SecretAgentCheckConfig sslSchedule = new SecretAgentCheckConfig("ssl-status", 300, sslCheckPath, "-e", "localhost", "-p", "4443", "-t", "30"); configs.add(annotatedCheck(node, sslSchedule)); } Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/etc/yamas-agent/")); configs.forEach(s -> IOExceptionUtil.uncheck(() -> s.writeTo(yamasAgentFolder))); final String[] restartYamasAgent = new String[]{"service", "yamas-agent", "restart"}; dockerOperations.executeCommandInContainerAsRoot(containerName, restartYamasAgent); }
class StorageMaintainer { private static final ContainerName NODE_ADMIN = new ContainerName("node-admin"); private static final ObjectMapper objectMapper = new ObjectMapper(); private final GaugeWrapper numberOfCoredumpsOnHost; private final CounterWrapper numberOfNodeAdminMaintenanceFails; private final DockerOperations dockerOperations; private final ProcessExecuter processExecuter; private final Environment environment; private final Clock clock; private Map<ContainerName, MaintenanceThrottler> maintenanceThrottlerByContainerName = new ConcurrentHashMap<>(); public StorageMaintainer(DockerOperations dockerOperations, ProcessExecuter processExecuter, MetricReceiverWrapper metricReceiver, Environment environment, Clock clock) { this.dockerOperations = dockerOperations; this.processExecuter = processExecuter; this.environment = environment; this.clock = clock; Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); numberOfNodeAdminMaintenanceFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.maintenance.fails"); numberOfCoredumpsOnHost = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.coredumps"); } private SecretAgentCheckConfig annotatedCheck(NodeSpec node, SecretAgentCheckConfig check) { check.withTag("namespace", "Vespa") .withTag("role", "tenants") .withTag("flavor", node.getFlavor()) .withTag("canonicalFlavor", node.getCanonicalFlavor()) .withTag("state", node.getState().toString()) .withTag("zone", environment.getZone()) .withTag("parentHostname", environment.getParentHostHostname()); node.getOwner().ifPresent(owner -> check .withTag("tenantName", owner.getTenant()) .withTag("app", owner.getApplication() + "." + owner.getInstance()) .withTag("applicationName", owner.getApplication()) .withTag("instanceName", owner.getInstance()) .withTag("applicationId", owner.getTenant() + "." + owner.getApplication() + "." + owner.getInstance())); node.getMembership().ifPresent(membership -> check .withTag("clustertype", membership.getClusterType()) .withTag("clusterid", membership.getClusterId())); node.getVespaVersion().ifPresent(version -> check.withTag("vespaVersion", version)); return check; } public void writeFilebeatConfig(ContainerName containerName, NodeSpec node) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); try { FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment); Optional<String> config = filebeatConfigProvider.getConfig(node); if (!config.isPresent()) { logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + node.toString()); return; } Path filebeatPath = environment.pathInNodeAdminFromPathInNode( containerName, Paths.get("/etc/filebeat/filebeat.yml")); Files.write(filebeatPath, config.get().getBytes()); logger.info("Wrote filebeat config."); } catch (Throwable t) { logger.error("Failed writing filebeat config; " + node, t); } } public Optional<Long> getDiskUsageFor(ContainerName containerName) { Path containerDir = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/home/")); try { return Optional.of(getDiskUsedInBytes(containerDir)); } catch (Throwable e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); logger.error("Problems during disk usage calculations in " + containerDir.toAbsolutePath(), e); return Optional.empty(); } } long getDiskUsedInBytes(Path path) throws IOException, InterruptedException { if (!Files.exists(path)) { return 0; } final String[] command = {"du", "-xsk", path.toString()}; Process duCommand = new ProcessBuilder().command(command).start(); if (!duCommand.waitFor(60, TimeUnit.SECONDS)) { duCommand.destroy(); throw new RuntimeException("Disk usage command timed out, aborting."); } String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream())); String[] results = output.split("\t"); if (results.length != 2) { throw new RuntimeException("Result from disk usage command not as expected: " + output); } long diskUsageKB = Long.valueOf(results[0]); return diskUsageKB * 1024; } /** * Deletes old log files for vespa, nginx, logstash, etc. */ public void removeOldFilesFromNode(ContainerName containerName) { if (! getMaintenanceThrottlerFor(containerName).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextRemoveOldFilesTime(); } private void addRemoveOldFilesCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName) { Path[] pathsToClean = { environment.pathInNodeUnderVespaHome("logs/elasticsearch2"), environment.pathInNodeUnderVespaHome("logs/logstash2"), environment.pathInNodeUnderVespaHome("logs/daemontools_y"), environment.pathInNodeUnderVespaHome("logs/nginx"), environment.pathInNodeUnderVespaHome("logs/vespa") }; for (Path pathToClean : pathsToClean) { Path path = environment.pathInNodeAdminFromPathInNode(containerName, pathToClean); if (Files.exists(path)) { maintainerExecutor.addJob("delete-files") .withArgument("basePath", path) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*\\.log.+") .withArgument("recursive", false); } } Path qrsDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/qrs")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", qrsDir) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*QueryAccessLog.*") .withArgument("recursive", false); Path logArchiveDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/logarchive")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", logArchiveDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); } /** * Checks if container has any new coredumps, reports and archives them if so * * @param force Set to true to bypass throttling */ public void handleCoreDumpsForContainer(ContainerName containerName, NodeSpec node, boolean force) { IOExceptionUtil.uncheck(() -> numberOfCoredumpsOnHost.sample(Files.list(environment.pathInNodeAdminToDoneCoredumps()).count())); if (! getMaintenanceThrottlerFor(containerName).shouldHandleCoredumpsNow() && !force) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextHandleCoredumpsTime(); } private void addHandleCoredumpsCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName, NodeSpec node) { if (!environment.getCoredumpFeedEndpoint().isPresent()) { return; } Map<String, Object> attributes = new HashMap<>(); attributes.put("hostname", node.getHostname()); attributes.put("parent_hostname", environment.getParentHostHostname()); attributes.put("region", environment.getRegion()); attributes.put("environment", environment.getEnvironment()); attributes.put("flavor", node.getFlavor()); attributes.put("kernel_version", System.getProperty("os.version")); node.getCurrentDockerImage().ifPresent(image -> attributes.put("docker_image", image.asString())); node.getVespaVersion().ifPresent(version -> attributes.put("vespa_version", version)); node.getOwner().ifPresent(owner -> { attributes.put("tenant", owner.getTenant()); attributes.put("application", owner.getApplication()); attributes.put("instance", owner.getInstance()); }); maintainerExecutor.addJob("handle-core-dumps") .withArgument("doneCoredumpsPath", environment.pathInNodeAdminToDoneCoredumps()) .withArgument("coredumpsPath", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/crash"))) .withArgument("feedEndpoint", environment.getCoredumpFeedEndpoint().get()) .withArgument("attributes", attributes); } /** * Deletes old * * archived app data * * Vespa logs * * Filedistribution files */ public void cleanNodeAdmin() { if (! getMaintenanceThrottlerFor(NODE_ADMIN).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); maintainerExecutor.addJob("delete-directories") .withArgument("basePath", environment.getPathResolver().getApplicationStoragePathForNodeAdmin()) .withArgument("maxAgeSeconds", Duration.ofDays(7).getSeconds()) .withArgument("dirNameRegex", "^" + Pattern.quote(Environment.APPLICATION_STORAGE_CLEANUP_PATH_PREFIX)); Path nodeAdminJDiskLogsPath = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("logs/vespa/")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", nodeAdminJDiskLogsPath) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); maintainerExecutor.execute(); getMaintenanceThrottlerFor(NODE_ADMIN).updateNextRemoveOldFilesTime(); } /** * Prepares the container-storage for the next container by deleting/archiving all the data of the current container. * Removes old files, reports coredumps and archives container data, runs when container enters state "dirty" */ public void cleanupNodeStorage(ContainerName containerName, NodeSpec node) { MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); addArchiveNodeData(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).reset(); } private void addArchiveNodeData(MaintainerExecutor maintainerExecutor, ContainerName containerName) { maintainerExecutor.addJob("recursive-delete") .withArgument("path", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var"))); maintainerExecutor.addJob("move-files") .withArgument("from", environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/"))) .withArgument("to", environment.pathInNodeAdminToNodeCleanup(containerName)); } /** * Runs node-maintainer's SpecVerifier and returns its output * @param node Node specification containing the excepted values we want to verify against * @return new combined hardware divergence * @throws RuntimeException if exit code != 0 */ public String getHardwareDivergence(NodeSpec node) { List<String> arguments = new ArrayList<>(Arrays.asList("specification", "--disk", Double.toString(node.getMinDiskAvailableGb()), "--memory", Double.toString(node.getMinMainMemoryAvailableGb()), "--cpu_cores", Double.toString(node.getMinCpuCores()), "--is_ssd", Boolean.toString(node.isFastDisk()), "--ips", String.join(",", node.getIpAddresses()))); node.getHardwareDivergence().ifPresent(hardwareDivergence -> { arguments.add("--divergence"); arguments.add(hardwareDivergence); }); return executeMaintainer("com.yahoo.vespa.hosted.node.verification.Main", arguments.toArray(new String[0])); } private String executeMaintainer(String mainClass, String... args) { String[] command = Stream.concat( Stream.of("sudo", "VESPA_HOME=" + getDefaults().vespaHome(), getDefaults().underVespaHome("libexec/vespa/node-admin/maintenance.sh"), mainClass), Stream.of(args)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(command); if (result.getFirst() != 0) { numberOfNodeAdminMaintenanceFails.add(); throw new RuntimeException( String.format("Maintainer failed to execute command: %s, Exit code: %d, Stdout/stderr: %s", Arrays.toString(command), result.getFirst(), result.getSecond())); } return result.getSecond().trim(); } catch (IOException e) { throw new RuntimeException("Failed to execute maintainer", e); } } /** * Wrapper for node-admin-maintenance, queues up maintenances jobs and sends a single request to maintenance JVM */ private class MaintainerExecutor { private final List<MaintainerExecutorJob> jobs = new ArrayList<>(); MaintainerExecutorJob addJob(String jobName) { MaintainerExecutorJob job = new MaintainerExecutorJob(jobName); jobs.add(job); return job; } void execute() { String args; try { args = objectMapper.writeValueAsString(jobs); } catch (JsonProcessingException e) { throw new RuntimeException("Failed transform list of maintenance jobs to JSON"); } executeMaintainer("com.yahoo.vespa.hosted.node.maintainer.Maintainer", args); } } private class MaintainerExecutorJob { @JsonProperty(value="type") private final String type; @JsonProperty(value="arguments") private final Map<String, Object> arguments = new HashMap<>(); MaintainerExecutorJob(String type) { this.type = type; } MaintainerExecutorJob withArgument(String argument, Object value) { arguments.put(argument, (value instanceof Path) ? value.toString() : value); return this; } } private MaintenanceThrottler getMaintenanceThrottlerFor(ContainerName containerName) { maintenanceThrottlerByContainerName.putIfAbsent(containerName, new MaintenanceThrottler()); return maintenanceThrottlerByContainerName.get(containerName); } private class MaintenanceThrottler { private Instant nextRemoveOldFilesAt = Instant.EPOCH; private Instant nextHandleOldCoredumpsAt = Instant.EPOCH; void updateNextRemoveOldFilesTime() { nextRemoveOldFilesAt = clock.instant().plus(Duration.ofHours(1)); } boolean shouldRemoveOldFilesNow() { return !nextRemoveOldFilesAt.isAfter(clock.instant()); } void updateNextHandleCoredumpsTime() { nextHandleOldCoredumpsAt = clock.instant().plus(Duration.ofMinutes(5)); } boolean shouldHandleCoredumpsNow() { return !nextHandleOldCoredumpsAt.isAfter(clock.instant()); } void reset() { nextRemoveOldFilesAt = Instant.EPOCH; nextHandleOldCoredumpsAt = Instant.EPOCH; } } }
class StorageMaintainer { private static final ContainerName NODE_ADMIN = new ContainerName("node-admin"); private static final ObjectMapper objectMapper = new ObjectMapper(); private final GaugeWrapper numberOfCoredumpsOnHost; private final CounterWrapper numberOfNodeAdminMaintenanceFails; private final DockerOperations dockerOperations; private final ProcessExecuter processExecuter; private final Environment environment; private final Clock clock; private Map<ContainerName, MaintenanceThrottler> maintenanceThrottlerByContainerName = new ConcurrentHashMap<>(); public StorageMaintainer(DockerOperations dockerOperations, ProcessExecuter processExecuter, MetricReceiverWrapper metricReceiver, Environment environment, Clock clock) { this.dockerOperations = dockerOperations; this.processExecuter = processExecuter; this.environment = environment; this.clock = clock; Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); numberOfNodeAdminMaintenanceFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.maintenance.fails"); numberOfCoredumpsOnHost = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.coredumps"); } private SecretAgentCheckConfig annotatedCheck(NodeSpec node, SecretAgentCheckConfig check) { check.withTag("namespace", "Vespa") .withTag("role", "tenants") .withTag("flavor", node.getFlavor()) .withTag("canonicalFlavor", node.getCanonicalFlavor()) .withTag("state", node.getState().toString()) .withTag("zone", environment.getZone()) .withTag("parentHostname", environment.getParentHostHostname()); node.getOwner().ifPresent(owner -> check .withTag("tenantName", owner.getTenant()) .withTag("app", owner.getApplication() + "." + owner.getInstance()) .withTag("applicationName", owner.getApplication()) .withTag("instanceName", owner.getInstance()) .withTag("applicationId", owner.getTenant() + "." + owner.getApplication() + "." + owner.getInstance())); node.getMembership().ifPresent(membership -> check .withTag("clustertype", membership.getClusterType()) .withTag("clusterid", membership.getClusterId())); node.getVespaVersion().ifPresent(version -> check.withTag("vespaVersion", version)); return check; } public void writeFilebeatConfig(ContainerName containerName, NodeSpec node) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); try { FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment); Optional<String> config = filebeatConfigProvider.getConfig(node); if (!config.isPresent()) { logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + node.toString()); return; } Path filebeatPath = environment.pathInNodeAdminFromPathInNode( containerName, Paths.get("/etc/filebeat/filebeat.yml")); Files.write(filebeatPath, config.get().getBytes()); logger.info("Wrote filebeat config."); } catch (Throwable t) { logger.error("Failed writing filebeat config; " + node, t); } } public Optional<Long> getDiskUsageFor(ContainerName containerName) { Path containerDir = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/home/")); try { return Optional.of(getDiskUsedInBytes(containerDir)); } catch (Throwable e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); logger.error("Problems during disk usage calculations in " + containerDir.toAbsolutePath(), e); return Optional.empty(); } } long getDiskUsedInBytes(Path path) throws IOException, InterruptedException { if (!Files.exists(path)) { return 0; } final String[] command = {"du", "-xsk", path.toString()}; Process duCommand = new ProcessBuilder().command(command).start(); if (!duCommand.waitFor(60, TimeUnit.SECONDS)) { duCommand.destroy(); throw new RuntimeException("Disk usage command timed out, aborting."); } String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream())); String[] results = output.split("\t"); if (results.length != 2) { throw new RuntimeException("Result from disk usage command not as expected: " + output); } long diskUsageKB = Long.valueOf(results[0]); return diskUsageKB * 1024; } /** * Deletes old log files for vespa, nginx, logstash, etc. */ public void removeOldFilesFromNode(ContainerName containerName) { if (! getMaintenanceThrottlerFor(containerName).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextRemoveOldFilesTime(); } private void addRemoveOldFilesCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName) { Path[] pathsToClean = { environment.pathInNodeUnderVespaHome("logs/elasticsearch2"), environment.pathInNodeUnderVespaHome("logs/logstash2"), environment.pathInNodeUnderVespaHome("logs/daemontools_y"), environment.pathInNodeUnderVespaHome("logs/nginx"), environment.pathInNodeUnderVespaHome("logs/vespa") }; for (Path pathToClean : pathsToClean) { Path path = environment.pathInNodeAdminFromPathInNode(containerName, pathToClean); if (Files.exists(path)) { maintainerExecutor.addJob("delete-files") .withArgument("basePath", path) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*\\.log.+") .withArgument("recursive", false); } } Path qrsDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/qrs")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", qrsDir) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*QueryAccessLog.*") .withArgument("recursive", false); Path logArchiveDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/logarchive")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", logArchiveDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); } /** * Checks if container has any new coredumps, reports and archives them if so * * @param force Set to true to bypass throttling */ public void handleCoreDumpsForContainer(ContainerName containerName, NodeSpec node, boolean force) { try { numberOfCoredumpsOnHost.sample(Files.list(environment.pathInNodeAdminToDoneCoredumps()).count()); } catch (IOException e) { } if (! getMaintenanceThrottlerFor(containerName).shouldHandleCoredumpsNow() && !force) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextHandleCoredumpsTime(); } private void addHandleCoredumpsCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName, NodeSpec node) { if (!environment.getCoredumpFeedEndpoint().isPresent()) { return; } Map<String, Object> attributes = new HashMap<>(); attributes.put("hostname", node.getHostname()); attributes.put("parent_hostname", environment.getParentHostHostname()); attributes.put("region", environment.getRegion()); attributes.put("environment", environment.getEnvironment()); attributes.put("flavor", node.getFlavor()); attributes.put("kernel_version", System.getProperty("os.version")); node.getCurrentDockerImage().ifPresent(image -> attributes.put("docker_image", image.asString())); node.getVespaVersion().ifPresent(version -> attributes.put("vespa_version", version)); node.getOwner().ifPresent(owner -> { attributes.put("tenant", owner.getTenant()); attributes.put("application", owner.getApplication()); attributes.put("instance", owner.getInstance()); }); maintainerExecutor.addJob("handle-core-dumps") .withArgument("doneCoredumpsPath", environment.pathInNodeAdminToDoneCoredumps()) .withArgument("coredumpsPath", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/crash"))) .withArgument("feedEndpoint", environment.getCoredumpFeedEndpoint().get()) .withArgument("attributes", attributes); } /** * Deletes old * * archived app data * * Vespa logs * * Filedistribution files */ public void cleanNodeAdmin() { if (! getMaintenanceThrottlerFor(NODE_ADMIN).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); maintainerExecutor.addJob("delete-directories") .withArgument("basePath", environment.getPathResolver().getApplicationStoragePathForNodeAdmin()) .withArgument("maxAgeSeconds", Duration.ofDays(7).getSeconds()) .withArgument("dirNameRegex", "^" + Pattern.quote(Environment.APPLICATION_STORAGE_CLEANUP_PATH_PREFIX)); Path nodeAdminJDiskLogsPath = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("logs/vespa/")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", nodeAdminJDiskLogsPath) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); maintainerExecutor.execute(); getMaintenanceThrottlerFor(NODE_ADMIN).updateNextRemoveOldFilesTime(); } /** * Prepares the container-storage for the next container by deleting/archiving all the data of the current container. * Removes old files, reports coredumps and archives container data, runs when container enters state "dirty" */ public void cleanupNodeStorage(ContainerName containerName, NodeSpec node) { MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); addArchiveNodeData(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).reset(); } private void addArchiveNodeData(MaintainerExecutor maintainerExecutor, ContainerName containerName) { maintainerExecutor.addJob("recursive-delete") .withArgument("path", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var"))); maintainerExecutor.addJob("move-files") .withArgument("from", environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/"))) .withArgument("to", environment.pathInNodeAdminToNodeCleanup(containerName)); } /** * Runs node-maintainer's SpecVerifier and returns its output * @param node Node specification containing the excepted values we want to verify against * @return new combined hardware divergence * @throws RuntimeException if exit code != 0 */ public String getHardwareDivergence(NodeSpec node) { List<String> arguments = new ArrayList<>(Arrays.asList("specification", "--disk", Double.toString(node.getMinDiskAvailableGb()), "--memory", Double.toString(node.getMinMainMemoryAvailableGb()), "--cpu_cores", Double.toString(node.getMinCpuCores()), "--is_ssd", Boolean.toString(node.isFastDisk()), "--ips", String.join(",", node.getIpAddresses()))); node.getHardwareDivergence().ifPresent(hardwareDivergence -> { arguments.add("--divergence"); arguments.add(hardwareDivergence); }); return executeMaintainer("com.yahoo.vespa.hosted.node.verification.Main", arguments.toArray(new String[0])); } private String executeMaintainer(String mainClass, String... args) { String[] command = Stream.concat( Stream.of("sudo", "VESPA_HOME=" + getDefaults().vespaHome(), getDefaults().underVespaHome("libexec/vespa/node-admin/maintenance.sh"), mainClass), Stream.of(args)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(command); if (result.getFirst() != 0) { numberOfNodeAdminMaintenanceFails.add(); throw new RuntimeException( String.format("Maintainer failed to execute command: %s, Exit code: %d, Stdout/stderr: %s", Arrays.toString(command), result.getFirst(), result.getSecond())); } return result.getSecond().trim(); } catch (IOException e) { throw new RuntimeException("Failed to execute maintainer", e); } } /** * Wrapper for node-admin-maintenance, queues up maintenances jobs and sends a single request to maintenance JVM */ private class MaintainerExecutor { private final List<MaintainerExecutorJob> jobs = new ArrayList<>(); MaintainerExecutorJob addJob(String jobName) { MaintainerExecutorJob job = new MaintainerExecutorJob(jobName); jobs.add(job); return job; } void execute() { String args; try { args = objectMapper.writeValueAsString(jobs); } catch (JsonProcessingException e) { throw new RuntimeException("Failed transform list of maintenance jobs to JSON"); } executeMaintainer("com.yahoo.vespa.hosted.node.maintainer.Maintainer", args); } } private class MaintainerExecutorJob { @JsonProperty(value="type") private final String type; @JsonProperty(value="arguments") private final Map<String, Object> arguments = new HashMap<>(); MaintainerExecutorJob(String type) { this.type = type; } MaintainerExecutorJob withArgument(String argument, Object value) { arguments.put(argument, (value instanceof Path) ? value.toString() : value); return this; } } private MaintenanceThrottler getMaintenanceThrottlerFor(ContainerName containerName) { maintenanceThrottlerByContainerName.putIfAbsent(containerName, new MaintenanceThrottler()); return maintenanceThrottlerByContainerName.get(containerName); } private class MaintenanceThrottler { private Instant nextRemoveOldFilesAt = Instant.EPOCH; private Instant nextHandleOldCoredumpsAt = Instant.EPOCH; void updateNextRemoveOldFilesTime() { nextRemoveOldFilesAt = clock.instant().plus(Duration.ofHours(1)); } boolean shouldRemoveOldFilesNow() { return !nextRemoveOldFilesAt.isAfter(clock.instant()); } void updateNextHandleCoredumpsTime() { nextHandleOldCoredumpsAt = clock.instant().plus(Duration.ofMinutes(5)); } boolean shouldHandleCoredumpsNow() { return !nextHandleOldCoredumpsAt.isAfter(clock.instant()); } void reset() { nextRemoveOldFilesAt = Instant.EPOCH; nextHandleOldCoredumpsAt = Instant.EPOCH; } } }
Yes
public void writeMetricsConfig(ContainerName containerName, NodeSpec node) { List<SecretAgentCheckConfig> configs = new ArrayList<>(); Path vespaCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa"); SecretAgentCheckConfig vespaSchedule = new SecretAgentCheckConfig("vespa", 60, vespaCheckPath, "all"); configs.add(annotatedCheck(node, vespaSchedule)); Path vespaHealthCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa_health"); SecretAgentCheckConfig vespaHealthSchedule = new SecretAgentCheckConfig("vespa", 60, vespaHealthCheckPath, "all"); configs.add(annotatedCheck(node, vespaHealthSchedule)); Path hostLifeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_host_life"); SecretAgentCheckConfig hostLifeSchedule = new SecretAgentCheckConfig("host-life", 60, hostLifeCheckPath); configs.add(annotatedCheck(node, hostLifeSchedule)); Path ntpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ntp"); SecretAgentCheckConfig ntpSchedule = new SecretAgentCheckConfig("host-life", 60, ntpCheckPath); configs.add(annotatedCheck(node, ntpSchedule)); Path coredumpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_coredumps"); SecretAgentCheckConfig coredumpSchedule = new SecretAgentCheckConfig("system-coredumps-processing", 300, coredumpCheckPath, "--application", "system-coredumps-processing", "--lastmin", "129600", "--crit", "1", "--coredir", "/home/y/var/crash/processing"); configs.add(annotatedCheck(node, coredumpSchedule)); if (node.getNodeType() == NodeType.config) { Path configServerCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ymonsb2"); SecretAgentCheckConfig configServerSchedule = new SecretAgentCheckConfig("configserver", 60, configServerCheckPath, "-zero", "configserver"); configs.add(annotatedCheck(node, configServerSchedule)); Path zkbackupCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig zkbackupSchedule = new SecretAgentCheckConfig("zkbackupage", 300, zkbackupCheckPath, "-f", "/home/y/var/vespa-hosted/zkbackup.stat", "-m", "150", "-a", "config-zkbackupage"); configs.add(annotatedCheck(node, zkbackupSchedule)); } if (node.getNodeType() == NodeType.proxy) { Path routingAgeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig routingAgeSchedule = new SecretAgentCheckConfig("routing-configage", 60, routingAgeCheckPath, "-f", "home/y/var/vespa-hosted/routing/nginx.conf", "-m", "90", "-a", "routing-configage"); configs.add(annotatedCheck(node, routingAgeSchedule)); Path sslCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ssl_status"); SecretAgentCheckConfig sslSchedule = new SecretAgentCheckConfig("zkbackupage", 300, sslCheckPath, "-e", "localhost", "-p", "4443", "-t", "30"); configs.add(annotatedCheck(node, sslSchedule)); } Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/etc/yamas-agent/")); configs.forEach(s -> IOExceptionUtil.uncheck(() -> s.writeTo(yamasAgentFolder))); final String[] restartYamasAgent = new String[]{"service", "yamas-agent", "restart"}; dockerOperations.executeCommandInContainerAsRoot(containerName, restartYamasAgent); }
"129600", "--crit", "1", "--coredir", "/home/y/var/crash/processing");
public void writeMetricsConfig(ContainerName containerName, NodeSpec node) { List<SecretAgentCheckConfig> configs = new ArrayList<>(); Path hostLifeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_host_life"); SecretAgentCheckConfig hostLifeSchedule = new SecretAgentCheckConfig("host-life", 60, hostLifeCheckPath); configs.add(annotatedCheck(node, hostLifeSchedule)); Path ntpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ntp"); SecretAgentCheckConfig ntpSchedule = new SecretAgentCheckConfig("ntp", 60, ntpCheckPath); configs.add(annotatedCheck(node, ntpSchedule)); Path coredumpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_coredumps"); SecretAgentCheckConfig coredumpSchedule = new SecretAgentCheckConfig("system-coredumps-processing", 300, coredumpCheckPath, "--application", "system-coredumps-processing", "--lastmin", "129600", "--crit", "1", "--coredir", environment.pathInNodeUnderVespaHome("var/crash/processing").toString()); configs.add(annotatedCheck(node, coredumpSchedule)); if (node.getNodeType() != NodeType.config) { Path vespaHealthCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa_health"); SecretAgentCheckConfig vespaHealthSchedule = new SecretAgentCheckConfig("vespa-health", 60, vespaHealthCheckPath, "all"); configs.add(annotatedCheck(node, vespaHealthSchedule)); Path vespaCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa"); SecretAgentCheckConfig vespaSchedule = new SecretAgentCheckConfig("vespa", 60, vespaCheckPath, "all"); configs.add(annotatedCheck(node, vespaSchedule)); } if (node.getNodeType() == NodeType.config) { Path configServerCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ymonsb2"); SecretAgentCheckConfig configServerSchedule = new SecretAgentCheckConfig("configserver", 60, configServerCheckPath, "-zero", "configserver"); configs.add(annotatedCheck(node, configServerSchedule)); Path zkbackupCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig zkbackupSchedule = new SecretAgentCheckConfig("zkbackupage", 300, zkbackupCheckPath, "-f", environment.pathInNodeUnderVespaHome("var/vespa-hosted/zkbackup.stat").toString(), "-m", "150", "-a", "config-zkbackupage"); configs.add(annotatedCheck(node, zkbackupSchedule)); } if (node.getNodeType() == NodeType.proxy) { Path routingAgeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig routingAgeSchedule = new SecretAgentCheckConfig("routing-configage", 60, routingAgeCheckPath, "-f", environment.pathInNodeUnderVespaHome("var/vespa-hosted/routing/nginx.conf").toString(), "-m", "90", "-a", "routing-configage"); configs.add(annotatedCheck(node, routingAgeSchedule)); Path sslCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ssl_status"); SecretAgentCheckConfig sslSchedule = new SecretAgentCheckConfig("ssl-status", 300, sslCheckPath, "-e", "localhost", "-p", "4443", "-t", "30"); configs.add(annotatedCheck(node, sslSchedule)); } Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/etc/yamas-agent/")); configs.forEach(s -> IOExceptionUtil.uncheck(() -> s.writeTo(yamasAgentFolder))); final String[] restartYamasAgent = new String[]{"service", "yamas-agent", "restart"}; dockerOperations.executeCommandInContainerAsRoot(containerName, restartYamasAgent); }
class StorageMaintainer { private static final ContainerName NODE_ADMIN = new ContainerName("node-admin"); private static final ObjectMapper objectMapper = new ObjectMapper(); private final GaugeWrapper numberOfCoredumpsOnHost; private final CounterWrapper numberOfNodeAdminMaintenanceFails; private final DockerOperations dockerOperations; private final ProcessExecuter processExecuter; private final Environment environment; private final Clock clock; private Map<ContainerName, MaintenanceThrottler> maintenanceThrottlerByContainerName = new ConcurrentHashMap<>(); public StorageMaintainer(DockerOperations dockerOperations, ProcessExecuter processExecuter, MetricReceiverWrapper metricReceiver, Environment environment, Clock clock) { this.dockerOperations = dockerOperations; this.processExecuter = processExecuter; this.environment = environment; this.clock = clock; Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); numberOfNodeAdminMaintenanceFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.maintenance.fails"); numberOfCoredumpsOnHost = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.coredumps"); } private SecretAgentCheckConfig annotatedCheck(NodeSpec node, SecretAgentCheckConfig check) { check.withTag("namespace", "Vespa") .withTag("role", "tenants") .withTag("flavor", node.getFlavor()) .withTag("canonicalFlavor", node.getCanonicalFlavor()) .withTag("state", node.getState().toString()) .withTag("zone", environment.getZone()) .withTag("parentHostname", environment.getParentHostHostname()); node.getOwner().ifPresent(owner -> check .withTag("tenantName", owner.getTenant()) .withTag("app", owner.getApplication() + "." + owner.getInstance()) .withTag("applicationName", owner.getApplication()) .withTag("instanceName", owner.getInstance()) .withTag("applicationId", owner.getTenant() + "." + owner.getApplication() + "." + owner.getInstance())); node.getMembership().ifPresent(membership -> check .withTag("clustertype", membership.getClusterType()) .withTag("clusterid", membership.getClusterId())); node.getVespaVersion().ifPresent(version -> check.withTag("vespaVersion", version)); return check; } public void writeFilebeatConfig(ContainerName containerName, NodeSpec node) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); try { FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment); Optional<String> config = filebeatConfigProvider.getConfig(node); if (!config.isPresent()) { logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + node.toString()); return; } Path filebeatPath = environment.pathInNodeAdminFromPathInNode( containerName, Paths.get("/etc/filebeat/filebeat.yml")); Files.write(filebeatPath, config.get().getBytes()); logger.info("Wrote filebeat config."); } catch (Throwable t) { logger.error("Failed writing filebeat config; " + node, t); } } public Optional<Long> getDiskUsageFor(ContainerName containerName) { Path containerDir = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/home/")); try { return Optional.of(getDiskUsedInBytes(containerDir)); } catch (Throwable e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); logger.error("Problems during disk usage calculations in " + containerDir.toAbsolutePath(), e); return Optional.empty(); } } long getDiskUsedInBytes(Path path) throws IOException, InterruptedException { if (!Files.exists(path)) { return 0; } final String[] command = {"du", "-xsk", path.toString()}; Process duCommand = new ProcessBuilder().command(command).start(); if (!duCommand.waitFor(60, TimeUnit.SECONDS)) { duCommand.destroy(); throw new RuntimeException("Disk usage command timed out, aborting."); } String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream())); String[] results = output.split("\t"); if (results.length != 2) { throw new RuntimeException("Result from disk usage command not as expected: " + output); } long diskUsageKB = Long.valueOf(results[0]); return diskUsageKB * 1024; } /** * Deletes old log files for vespa, nginx, logstash, etc. */ public void removeOldFilesFromNode(ContainerName containerName) { if (! getMaintenanceThrottlerFor(containerName).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextRemoveOldFilesTime(); } private void addRemoveOldFilesCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName) { Path[] pathsToClean = { environment.pathInNodeUnderVespaHome("logs/elasticsearch2"), environment.pathInNodeUnderVespaHome("logs/logstash2"), environment.pathInNodeUnderVespaHome("logs/daemontools_y"), environment.pathInNodeUnderVespaHome("logs/nginx"), environment.pathInNodeUnderVespaHome("logs/vespa") }; for (Path pathToClean : pathsToClean) { Path path = environment.pathInNodeAdminFromPathInNode(containerName, pathToClean); if (Files.exists(path)) { maintainerExecutor.addJob("delete-files") .withArgument("basePath", path) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*\\.log.+") .withArgument("recursive", false); } } Path qrsDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/qrs")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", qrsDir) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*QueryAccessLog.*") .withArgument("recursive", false); Path logArchiveDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/logarchive")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", logArchiveDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); } /** * Checks if container has any new coredumps, reports and archives them if so * * @param force Set to true to bypass throttling */ public void handleCoreDumpsForContainer(ContainerName containerName, NodeSpec node, boolean force) { IOExceptionUtil.uncheck(() -> numberOfCoredumpsOnHost.sample(Files.list(environment.pathInNodeAdminToDoneCoredumps()).count())); if (! getMaintenanceThrottlerFor(containerName).shouldHandleCoredumpsNow() && !force) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextHandleCoredumpsTime(); } private void addHandleCoredumpsCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName, NodeSpec node) { if (!environment.getCoredumpFeedEndpoint().isPresent()) { return; } Map<String, Object> attributes = new HashMap<>(); attributes.put("hostname", node.getHostname()); attributes.put("parent_hostname", environment.getParentHostHostname()); attributes.put("region", environment.getRegion()); attributes.put("environment", environment.getEnvironment()); attributes.put("flavor", node.getFlavor()); attributes.put("kernel_version", System.getProperty("os.version")); node.getCurrentDockerImage().ifPresent(image -> attributes.put("docker_image", image.asString())); node.getVespaVersion().ifPresent(version -> attributes.put("vespa_version", version)); node.getOwner().ifPresent(owner -> { attributes.put("tenant", owner.getTenant()); attributes.put("application", owner.getApplication()); attributes.put("instance", owner.getInstance()); }); maintainerExecutor.addJob("handle-core-dumps") .withArgument("doneCoredumpsPath", environment.pathInNodeAdminToDoneCoredumps()) .withArgument("coredumpsPath", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/crash"))) .withArgument("feedEndpoint", environment.getCoredumpFeedEndpoint().get()) .withArgument("attributes", attributes); } /** * Deletes old * * archived app data * * Vespa logs * * Filedistribution files */ public void cleanNodeAdmin() { if (! getMaintenanceThrottlerFor(NODE_ADMIN).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); maintainerExecutor.addJob("delete-directories") .withArgument("basePath", environment.getPathResolver().getApplicationStoragePathForNodeAdmin()) .withArgument("maxAgeSeconds", Duration.ofDays(7).getSeconds()) .withArgument("dirNameRegex", "^" + Pattern.quote(Environment.APPLICATION_STORAGE_CLEANUP_PATH_PREFIX)); Path nodeAdminJDiskLogsPath = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("logs/vespa/")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", nodeAdminJDiskLogsPath) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); maintainerExecutor.execute(); getMaintenanceThrottlerFor(NODE_ADMIN).updateNextRemoveOldFilesTime(); } /** * Prepares the container-storage for the next container by deleting/archiving all the data of the current container. * Removes old files, reports coredumps and archives container data, runs when container enters state "dirty" */ public void cleanupNodeStorage(ContainerName containerName, NodeSpec node) { MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); addArchiveNodeData(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).reset(); } private void addArchiveNodeData(MaintainerExecutor maintainerExecutor, ContainerName containerName) { maintainerExecutor.addJob("recursive-delete") .withArgument("path", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var"))); maintainerExecutor.addJob("move-files") .withArgument("from", environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/"))) .withArgument("to", environment.pathInNodeAdminToNodeCleanup(containerName)); } /** * Runs node-maintainer's SpecVerifier and returns its output * @param node Node specification containing the excepted values we want to verify against * @return new combined hardware divergence * @throws RuntimeException if exit code != 0 */ public String getHardwareDivergence(NodeSpec node) { List<String> arguments = new ArrayList<>(Arrays.asList("specification", "--disk", Double.toString(node.getMinDiskAvailableGb()), "--memory", Double.toString(node.getMinMainMemoryAvailableGb()), "--cpu_cores", Double.toString(node.getMinCpuCores()), "--is_ssd", Boolean.toString(node.isFastDisk()), "--ips", String.join(",", node.getIpAddresses()))); node.getHardwareDivergence().ifPresent(hardwareDivergence -> { arguments.add("--divergence"); arguments.add(hardwareDivergence); }); return executeMaintainer("com.yahoo.vespa.hosted.node.verification.Main", arguments.toArray(new String[0])); } private String executeMaintainer(String mainClass, String... args) { String[] command = Stream.concat( Stream.of("sudo", "VESPA_HOME=" + getDefaults().vespaHome(), getDefaults().underVespaHome("libexec/vespa/node-admin/maintenance.sh"), mainClass), Stream.of(args)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(command); if (result.getFirst() != 0) { numberOfNodeAdminMaintenanceFails.add(); throw new RuntimeException( String.format("Maintainer failed to execute command: %s, Exit code: %d, Stdout/stderr: %s", Arrays.toString(command), result.getFirst(), result.getSecond())); } return result.getSecond().trim(); } catch (IOException e) { throw new RuntimeException("Failed to execute maintainer", e); } } /** * Wrapper for node-admin-maintenance, queues up maintenances jobs and sends a single request to maintenance JVM */ private class MaintainerExecutor { private final List<MaintainerExecutorJob> jobs = new ArrayList<>(); MaintainerExecutorJob addJob(String jobName) { MaintainerExecutorJob job = new MaintainerExecutorJob(jobName); jobs.add(job); return job; } void execute() { String args; try { args = objectMapper.writeValueAsString(jobs); } catch (JsonProcessingException e) { throw new RuntimeException("Failed transform list of maintenance jobs to JSON"); } executeMaintainer("com.yahoo.vespa.hosted.node.maintainer.Maintainer", args); } } private class MaintainerExecutorJob { @JsonProperty(value="type") private final String type; @JsonProperty(value="arguments") private final Map<String, Object> arguments = new HashMap<>(); MaintainerExecutorJob(String type) { this.type = type; } MaintainerExecutorJob withArgument(String argument, Object value) { arguments.put(argument, (value instanceof Path) ? value.toString() : value); return this; } } private MaintenanceThrottler getMaintenanceThrottlerFor(ContainerName containerName) { maintenanceThrottlerByContainerName.putIfAbsent(containerName, new MaintenanceThrottler()); return maintenanceThrottlerByContainerName.get(containerName); } private class MaintenanceThrottler { private Instant nextRemoveOldFilesAt = Instant.EPOCH; private Instant nextHandleOldCoredumpsAt = Instant.EPOCH; void updateNextRemoveOldFilesTime() { nextRemoveOldFilesAt = clock.instant().plus(Duration.ofHours(1)); } boolean shouldRemoveOldFilesNow() { return !nextRemoveOldFilesAt.isAfter(clock.instant()); } void updateNextHandleCoredumpsTime() { nextHandleOldCoredumpsAt = clock.instant().plus(Duration.ofMinutes(5)); } boolean shouldHandleCoredumpsNow() { return !nextHandleOldCoredumpsAt.isAfter(clock.instant()); } void reset() { nextRemoveOldFilesAt = Instant.EPOCH; nextHandleOldCoredumpsAt = Instant.EPOCH; } } }
class StorageMaintainer { private static final ContainerName NODE_ADMIN = new ContainerName("node-admin"); private static final ObjectMapper objectMapper = new ObjectMapper(); private final GaugeWrapper numberOfCoredumpsOnHost; private final CounterWrapper numberOfNodeAdminMaintenanceFails; private final DockerOperations dockerOperations; private final ProcessExecuter processExecuter; private final Environment environment; private final Clock clock; private Map<ContainerName, MaintenanceThrottler> maintenanceThrottlerByContainerName = new ConcurrentHashMap<>(); public StorageMaintainer(DockerOperations dockerOperations, ProcessExecuter processExecuter, MetricReceiverWrapper metricReceiver, Environment environment, Clock clock) { this.dockerOperations = dockerOperations; this.processExecuter = processExecuter; this.environment = environment; this.clock = clock; Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); numberOfNodeAdminMaintenanceFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.maintenance.fails"); numberOfCoredumpsOnHost = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.coredumps"); } private SecretAgentCheckConfig annotatedCheck(NodeSpec node, SecretAgentCheckConfig check) { check.withTag("namespace", "Vespa") .withTag("role", "tenants") .withTag("flavor", node.getFlavor()) .withTag("canonicalFlavor", node.getCanonicalFlavor()) .withTag("state", node.getState().toString()) .withTag("zone", environment.getZone()) .withTag("parentHostname", environment.getParentHostHostname()); node.getOwner().ifPresent(owner -> check .withTag("tenantName", owner.getTenant()) .withTag("app", owner.getApplication() + "." + owner.getInstance()) .withTag("applicationName", owner.getApplication()) .withTag("instanceName", owner.getInstance()) .withTag("applicationId", owner.getTenant() + "." + owner.getApplication() + "." + owner.getInstance())); node.getMembership().ifPresent(membership -> check .withTag("clustertype", membership.getClusterType()) .withTag("clusterid", membership.getClusterId())); node.getVespaVersion().ifPresent(version -> check.withTag("vespaVersion", version)); return check; } public void writeFilebeatConfig(ContainerName containerName, NodeSpec node) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); try { FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment); Optional<String> config = filebeatConfigProvider.getConfig(node); if (!config.isPresent()) { logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + node.toString()); return; } Path filebeatPath = environment.pathInNodeAdminFromPathInNode( containerName, Paths.get("/etc/filebeat/filebeat.yml")); Files.write(filebeatPath, config.get().getBytes()); logger.info("Wrote filebeat config."); } catch (Throwable t) { logger.error("Failed writing filebeat config; " + node, t); } } public Optional<Long> getDiskUsageFor(ContainerName containerName) { Path containerDir = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/home/")); try { return Optional.of(getDiskUsedInBytes(containerDir)); } catch (Throwable e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); logger.error("Problems during disk usage calculations in " + containerDir.toAbsolutePath(), e); return Optional.empty(); } } long getDiskUsedInBytes(Path path) throws IOException, InterruptedException { if (!Files.exists(path)) { return 0; } final String[] command = {"du", "-xsk", path.toString()}; Process duCommand = new ProcessBuilder().command(command).start(); if (!duCommand.waitFor(60, TimeUnit.SECONDS)) { duCommand.destroy(); throw new RuntimeException("Disk usage command timed out, aborting."); } String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream())); String[] results = output.split("\t"); if (results.length != 2) { throw new RuntimeException("Result from disk usage command not as expected: " + output); } long diskUsageKB = Long.valueOf(results[0]); return diskUsageKB * 1024; } /** * Deletes old log files for vespa, nginx, logstash, etc. */ public void removeOldFilesFromNode(ContainerName containerName) { if (! getMaintenanceThrottlerFor(containerName).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextRemoveOldFilesTime(); } private void addRemoveOldFilesCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName) { Path[] pathsToClean = { environment.pathInNodeUnderVespaHome("logs/elasticsearch2"), environment.pathInNodeUnderVespaHome("logs/logstash2"), environment.pathInNodeUnderVespaHome("logs/daemontools_y"), environment.pathInNodeUnderVespaHome("logs/nginx"), environment.pathInNodeUnderVespaHome("logs/vespa") }; for (Path pathToClean : pathsToClean) { Path path = environment.pathInNodeAdminFromPathInNode(containerName, pathToClean); if (Files.exists(path)) { maintainerExecutor.addJob("delete-files") .withArgument("basePath", path) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*\\.log.+") .withArgument("recursive", false); } } Path qrsDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/qrs")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", qrsDir) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*QueryAccessLog.*") .withArgument("recursive", false); Path logArchiveDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/logarchive")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", logArchiveDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); } /** * Checks if container has any new coredumps, reports and archives them if so * * @param force Set to true to bypass throttling */ public void handleCoreDumpsForContainer(ContainerName containerName, NodeSpec node, boolean force) { try { numberOfCoredumpsOnHost.sample(Files.list(environment.pathInNodeAdminToDoneCoredumps()).count()); } catch (IOException e) { } if (! getMaintenanceThrottlerFor(containerName).shouldHandleCoredumpsNow() && !force) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextHandleCoredumpsTime(); } private void addHandleCoredumpsCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName, NodeSpec node) { if (!environment.getCoredumpFeedEndpoint().isPresent()) { return; } Map<String, Object> attributes = new HashMap<>(); attributes.put("hostname", node.getHostname()); attributes.put("parent_hostname", environment.getParentHostHostname()); attributes.put("region", environment.getRegion()); attributes.put("environment", environment.getEnvironment()); attributes.put("flavor", node.getFlavor()); attributes.put("kernel_version", System.getProperty("os.version")); node.getCurrentDockerImage().ifPresent(image -> attributes.put("docker_image", image.asString())); node.getVespaVersion().ifPresent(version -> attributes.put("vespa_version", version)); node.getOwner().ifPresent(owner -> { attributes.put("tenant", owner.getTenant()); attributes.put("application", owner.getApplication()); attributes.put("instance", owner.getInstance()); }); maintainerExecutor.addJob("handle-core-dumps") .withArgument("doneCoredumpsPath", environment.pathInNodeAdminToDoneCoredumps()) .withArgument("coredumpsPath", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/crash"))) .withArgument("feedEndpoint", environment.getCoredumpFeedEndpoint().get()) .withArgument("attributes", attributes); } /** * Deletes old * * archived app data * * Vespa logs * * Filedistribution files */ public void cleanNodeAdmin() { if (! getMaintenanceThrottlerFor(NODE_ADMIN).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); maintainerExecutor.addJob("delete-directories") .withArgument("basePath", environment.getPathResolver().getApplicationStoragePathForNodeAdmin()) .withArgument("maxAgeSeconds", Duration.ofDays(7).getSeconds()) .withArgument("dirNameRegex", "^" + Pattern.quote(Environment.APPLICATION_STORAGE_CLEANUP_PATH_PREFIX)); Path nodeAdminJDiskLogsPath = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("logs/vespa/")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", nodeAdminJDiskLogsPath) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); maintainerExecutor.execute(); getMaintenanceThrottlerFor(NODE_ADMIN).updateNextRemoveOldFilesTime(); } /** * Prepares the container-storage for the next container by deleting/archiving all the data of the current container. * Removes old files, reports coredumps and archives container data, runs when container enters state "dirty" */ public void cleanupNodeStorage(ContainerName containerName, NodeSpec node) { MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); addArchiveNodeData(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).reset(); } private void addArchiveNodeData(MaintainerExecutor maintainerExecutor, ContainerName containerName) { maintainerExecutor.addJob("recursive-delete") .withArgument("path", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var"))); maintainerExecutor.addJob("move-files") .withArgument("from", environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/"))) .withArgument("to", environment.pathInNodeAdminToNodeCleanup(containerName)); } /** * Runs node-maintainer's SpecVerifier and returns its output * @param node Node specification containing the excepted values we want to verify against * @return new combined hardware divergence * @throws RuntimeException if exit code != 0 */ public String getHardwareDivergence(NodeSpec node) { List<String> arguments = new ArrayList<>(Arrays.asList("specification", "--disk", Double.toString(node.getMinDiskAvailableGb()), "--memory", Double.toString(node.getMinMainMemoryAvailableGb()), "--cpu_cores", Double.toString(node.getMinCpuCores()), "--is_ssd", Boolean.toString(node.isFastDisk()), "--ips", String.join(",", node.getIpAddresses()))); node.getHardwareDivergence().ifPresent(hardwareDivergence -> { arguments.add("--divergence"); arguments.add(hardwareDivergence); }); return executeMaintainer("com.yahoo.vespa.hosted.node.verification.Main", arguments.toArray(new String[0])); } private String executeMaintainer(String mainClass, String... args) { String[] command = Stream.concat( Stream.of("sudo", "VESPA_HOME=" + getDefaults().vespaHome(), getDefaults().underVespaHome("libexec/vespa/node-admin/maintenance.sh"), mainClass), Stream.of(args)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(command); if (result.getFirst() != 0) { numberOfNodeAdminMaintenanceFails.add(); throw new RuntimeException( String.format("Maintainer failed to execute command: %s, Exit code: %d, Stdout/stderr: %s", Arrays.toString(command), result.getFirst(), result.getSecond())); } return result.getSecond().trim(); } catch (IOException e) { throw new RuntimeException("Failed to execute maintainer", e); } } /** * Wrapper for node-admin-maintenance, queues up maintenances jobs and sends a single request to maintenance JVM */ private class MaintainerExecutor { private final List<MaintainerExecutorJob> jobs = new ArrayList<>(); MaintainerExecutorJob addJob(String jobName) { MaintainerExecutorJob job = new MaintainerExecutorJob(jobName); jobs.add(job); return job; } void execute() { String args; try { args = objectMapper.writeValueAsString(jobs); } catch (JsonProcessingException e) { throw new RuntimeException("Failed transform list of maintenance jobs to JSON"); } executeMaintainer("com.yahoo.vespa.hosted.node.maintainer.Maintainer", args); } } private class MaintainerExecutorJob { @JsonProperty(value="type") private final String type; @JsonProperty(value="arguments") private final Map<String, Object> arguments = new HashMap<>(); MaintainerExecutorJob(String type) { this.type = type; } MaintainerExecutorJob withArgument(String argument, Object value) { arguments.put(argument, (value instanceof Path) ? value.toString() : value); return this; } } private MaintenanceThrottler getMaintenanceThrottlerFor(ContainerName containerName) { maintenanceThrottlerByContainerName.putIfAbsent(containerName, new MaintenanceThrottler()); return maintenanceThrottlerByContainerName.get(containerName); } private class MaintenanceThrottler { private Instant nextRemoveOldFilesAt = Instant.EPOCH; private Instant nextHandleOldCoredumpsAt = Instant.EPOCH; void updateNextRemoveOldFilesTime() { nextRemoveOldFilesAt = clock.instant().plus(Duration.ofHours(1)); } boolean shouldRemoveOldFilesNow() { return !nextRemoveOldFilesAt.isAfter(clock.instant()); } void updateNextHandleCoredumpsTime() { nextHandleOldCoredumpsAt = clock.instant().plus(Duration.ofMinutes(5)); } boolean shouldHandleCoredumpsNow() { return !nextHandleOldCoredumpsAt.isAfter(clock.instant()); } void reset() { nextRemoveOldFilesAt = Instant.EPOCH; nextHandleOldCoredumpsAt = Instant.EPOCH; } } }
Oops! I meant `yms_check_vespa` and **`yms_check_vespa_health`**. Both seem to use metrics proxy: ``` [root@cfg1:/]$ /home/y/libexec/yms/yms_check_vespa_health all { "status_code" : 1, "timestamp" : 1527683413, "application" : "all", "status_msg" : "Failed to get data from metrics proxy: error(104): (RPC) Connection error" } [root@cfg1:/]$ /home/y/libexec/yms/yms_check_vespa all { "status_code" : 1, "timestamp" : 1527683446, "application" : "all", "status_msg" : "Failed to get data from metrics proxy: error(104): (RPC) Connection error" } ```
public void writeMetricsConfig(ContainerName containerName, NodeSpec node) { List<SecretAgentCheckConfig> configs = new ArrayList<>(); Path vespaCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa"); SecretAgentCheckConfig vespaSchedule = new SecretAgentCheckConfig("vespa", 60, vespaCheckPath, "all"); configs.add(annotatedCheck(node, vespaSchedule)); Path vespaHealthCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa_health"); SecretAgentCheckConfig vespaHealthSchedule = new SecretAgentCheckConfig("vespa", 60, vespaHealthCheckPath, "all"); configs.add(annotatedCheck(node, vespaHealthSchedule)); Path hostLifeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_host_life"); SecretAgentCheckConfig hostLifeSchedule = new SecretAgentCheckConfig("host-life", 60, hostLifeCheckPath); configs.add(annotatedCheck(node, hostLifeSchedule)); Path ntpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ntp"); SecretAgentCheckConfig ntpSchedule = new SecretAgentCheckConfig("host-life", 60, ntpCheckPath); configs.add(annotatedCheck(node, ntpSchedule)); Path coredumpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_coredumps"); SecretAgentCheckConfig coredumpSchedule = new SecretAgentCheckConfig("system-coredumps-processing", 300, coredumpCheckPath, "--application", "system-coredumps-processing", "--lastmin", "129600", "--crit", "1", "--coredir", "/home/y/var/crash/processing"); configs.add(annotatedCheck(node, coredumpSchedule)); if (node.getNodeType() == NodeType.config) { Path configServerCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ymonsb2"); SecretAgentCheckConfig configServerSchedule = new SecretAgentCheckConfig("configserver", 60, configServerCheckPath, "-zero", "configserver"); configs.add(annotatedCheck(node, configServerSchedule)); Path zkbackupCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig zkbackupSchedule = new SecretAgentCheckConfig("zkbackupage", 300, zkbackupCheckPath, "-f", "/home/y/var/vespa-hosted/zkbackup.stat", "-m", "150", "-a", "config-zkbackupage"); configs.add(annotatedCheck(node, zkbackupSchedule)); } if (node.getNodeType() == NodeType.proxy) { Path routingAgeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig routingAgeSchedule = new SecretAgentCheckConfig("routing-configage", 60, routingAgeCheckPath, "-f", "home/y/var/vespa-hosted/routing/nginx.conf", "-m", "90", "-a", "routing-configage"); configs.add(annotatedCheck(node, routingAgeSchedule)); Path sslCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ssl_status"); SecretAgentCheckConfig sslSchedule = new SecretAgentCheckConfig("zkbackupage", 300, sslCheckPath, "-e", "localhost", "-p", "4443", "-t", "30"); configs.add(annotatedCheck(node, sslSchedule)); } Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/etc/yamas-agent/")); configs.forEach(s -> IOExceptionUtil.uncheck(() -> s.writeTo(yamasAgentFolder))); final String[] restartYamasAgent = new String[]{"service", "yamas-agent", "restart"}; dockerOperations.executeCommandInContainerAsRoot(containerName, restartYamasAgent); }
public void writeMetricsConfig(ContainerName containerName, NodeSpec node) { List<SecretAgentCheckConfig> configs = new ArrayList<>(); Path hostLifeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_host_life"); SecretAgentCheckConfig hostLifeSchedule = new SecretAgentCheckConfig("host-life", 60, hostLifeCheckPath); configs.add(annotatedCheck(node, hostLifeSchedule)); Path ntpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ntp"); SecretAgentCheckConfig ntpSchedule = new SecretAgentCheckConfig("ntp", 60, ntpCheckPath); configs.add(annotatedCheck(node, ntpSchedule)); Path coredumpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_coredumps"); SecretAgentCheckConfig coredumpSchedule = new SecretAgentCheckConfig("system-coredumps-processing", 300, coredumpCheckPath, "--application", "system-coredumps-processing", "--lastmin", "129600", "--crit", "1", "--coredir", environment.pathInNodeUnderVespaHome("var/crash/processing").toString()); configs.add(annotatedCheck(node, coredumpSchedule)); if (node.getNodeType() != NodeType.config) { Path vespaHealthCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa_health"); SecretAgentCheckConfig vespaHealthSchedule = new SecretAgentCheckConfig("vespa-health", 60, vespaHealthCheckPath, "all"); configs.add(annotatedCheck(node, vespaHealthSchedule)); Path vespaCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa"); SecretAgentCheckConfig vespaSchedule = new SecretAgentCheckConfig("vespa", 60, vespaCheckPath, "all"); configs.add(annotatedCheck(node, vespaSchedule)); } if (node.getNodeType() == NodeType.config) { Path configServerCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ymonsb2"); SecretAgentCheckConfig configServerSchedule = new SecretAgentCheckConfig("configserver", 60, configServerCheckPath, "-zero", "configserver"); configs.add(annotatedCheck(node, configServerSchedule)); Path zkbackupCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig zkbackupSchedule = new SecretAgentCheckConfig("zkbackupage", 300, zkbackupCheckPath, "-f", environment.pathInNodeUnderVespaHome("var/vespa-hosted/zkbackup.stat").toString(), "-m", "150", "-a", "config-zkbackupage"); configs.add(annotatedCheck(node, zkbackupSchedule)); } if (node.getNodeType() == NodeType.proxy) { Path routingAgeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig routingAgeSchedule = new SecretAgentCheckConfig("routing-configage", 60, routingAgeCheckPath, "-f", environment.pathInNodeUnderVespaHome("var/vespa-hosted/routing/nginx.conf").toString(), "-m", "90", "-a", "routing-configage"); configs.add(annotatedCheck(node, routingAgeSchedule)); Path sslCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ssl_status"); SecretAgentCheckConfig sslSchedule = new SecretAgentCheckConfig("ssl-status", 300, sslCheckPath, "-e", "localhost", "-p", "4443", "-t", "30"); configs.add(annotatedCheck(node, sslSchedule)); } Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/etc/yamas-agent/")); configs.forEach(s -> IOExceptionUtil.uncheck(() -> s.writeTo(yamasAgentFolder))); final String[] restartYamasAgent = new String[]{"service", "yamas-agent", "restart"}; dockerOperations.executeCommandInContainerAsRoot(containerName, restartYamasAgent); }
class StorageMaintainer { private static final ContainerName NODE_ADMIN = new ContainerName("node-admin"); private static final ObjectMapper objectMapper = new ObjectMapper(); private final GaugeWrapper numberOfCoredumpsOnHost; private final CounterWrapper numberOfNodeAdminMaintenanceFails; private final DockerOperations dockerOperations; private final ProcessExecuter processExecuter; private final Environment environment; private final Clock clock; private Map<ContainerName, MaintenanceThrottler> maintenanceThrottlerByContainerName = new ConcurrentHashMap<>(); public StorageMaintainer(DockerOperations dockerOperations, ProcessExecuter processExecuter, MetricReceiverWrapper metricReceiver, Environment environment, Clock clock) { this.dockerOperations = dockerOperations; this.processExecuter = processExecuter; this.environment = environment; this.clock = clock; Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); numberOfNodeAdminMaintenanceFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.maintenance.fails"); numberOfCoredumpsOnHost = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.coredumps"); } private SecretAgentCheckConfig annotatedCheck(NodeSpec node, SecretAgentCheckConfig check) { check.withTag("namespace", "Vespa") .withTag("role", "tenants") .withTag("flavor", node.getFlavor()) .withTag("canonicalFlavor", node.getCanonicalFlavor()) .withTag("state", node.getState().toString()) .withTag("zone", environment.getZone()) .withTag("parentHostname", environment.getParentHostHostname()); node.getOwner().ifPresent(owner -> check .withTag("tenantName", owner.getTenant()) .withTag("app", owner.getApplication() + "." + owner.getInstance()) .withTag("applicationName", owner.getApplication()) .withTag("instanceName", owner.getInstance()) .withTag("applicationId", owner.getTenant() + "." + owner.getApplication() + "." + owner.getInstance())); node.getMembership().ifPresent(membership -> check .withTag("clustertype", membership.getClusterType()) .withTag("clusterid", membership.getClusterId())); node.getVespaVersion().ifPresent(version -> check.withTag("vespaVersion", version)); return check; } public void writeFilebeatConfig(ContainerName containerName, NodeSpec node) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); try { FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment); Optional<String> config = filebeatConfigProvider.getConfig(node); if (!config.isPresent()) { logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + node.toString()); return; } Path filebeatPath = environment.pathInNodeAdminFromPathInNode( containerName, Paths.get("/etc/filebeat/filebeat.yml")); Files.write(filebeatPath, config.get().getBytes()); logger.info("Wrote filebeat config."); } catch (Throwable t) { logger.error("Failed writing filebeat config; " + node, t); } } public Optional<Long> getDiskUsageFor(ContainerName containerName) { Path containerDir = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/home/")); try { return Optional.of(getDiskUsedInBytes(containerDir)); } catch (Throwable e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); logger.error("Problems during disk usage calculations in " + containerDir.toAbsolutePath(), e); return Optional.empty(); } } long getDiskUsedInBytes(Path path) throws IOException, InterruptedException { if (!Files.exists(path)) { return 0; } final String[] command = {"du", "-xsk", path.toString()}; Process duCommand = new ProcessBuilder().command(command).start(); if (!duCommand.waitFor(60, TimeUnit.SECONDS)) { duCommand.destroy(); throw new RuntimeException("Disk usage command timed out, aborting."); } String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream())); String[] results = output.split("\t"); if (results.length != 2) { throw new RuntimeException("Result from disk usage command not as expected: " + output); } long diskUsageKB = Long.valueOf(results[0]); return diskUsageKB * 1024; } /** * Deletes old log files for vespa, nginx, logstash, etc. */ public void removeOldFilesFromNode(ContainerName containerName) { if (! getMaintenanceThrottlerFor(containerName).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextRemoveOldFilesTime(); } private void addRemoveOldFilesCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName) { Path[] pathsToClean = { environment.pathInNodeUnderVespaHome("logs/elasticsearch2"), environment.pathInNodeUnderVespaHome("logs/logstash2"), environment.pathInNodeUnderVespaHome("logs/daemontools_y"), environment.pathInNodeUnderVespaHome("logs/nginx"), environment.pathInNodeUnderVespaHome("logs/vespa") }; for (Path pathToClean : pathsToClean) { Path path = environment.pathInNodeAdminFromPathInNode(containerName, pathToClean); if (Files.exists(path)) { maintainerExecutor.addJob("delete-files") .withArgument("basePath", path) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*\\.log.+") .withArgument("recursive", false); } } Path qrsDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/qrs")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", qrsDir) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*QueryAccessLog.*") .withArgument("recursive", false); Path logArchiveDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/logarchive")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", logArchiveDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); } /** * Checks if container has any new coredumps, reports and archives them if so * * @param force Set to true to bypass throttling */ public void handleCoreDumpsForContainer(ContainerName containerName, NodeSpec node, boolean force) { IOExceptionUtil.uncheck(() -> numberOfCoredumpsOnHost.sample(Files.list(environment.pathInNodeAdminToDoneCoredumps()).count())); if (! getMaintenanceThrottlerFor(containerName).shouldHandleCoredumpsNow() && !force) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextHandleCoredumpsTime(); } private void addHandleCoredumpsCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName, NodeSpec node) { if (!environment.getCoredumpFeedEndpoint().isPresent()) { return; } Map<String, Object> attributes = new HashMap<>(); attributes.put("hostname", node.getHostname()); attributes.put("parent_hostname", environment.getParentHostHostname()); attributes.put("region", environment.getRegion()); attributes.put("environment", environment.getEnvironment()); attributes.put("flavor", node.getFlavor()); attributes.put("kernel_version", System.getProperty("os.version")); node.getCurrentDockerImage().ifPresent(image -> attributes.put("docker_image", image.asString())); node.getVespaVersion().ifPresent(version -> attributes.put("vespa_version", version)); node.getOwner().ifPresent(owner -> { attributes.put("tenant", owner.getTenant()); attributes.put("application", owner.getApplication()); attributes.put("instance", owner.getInstance()); }); maintainerExecutor.addJob("handle-core-dumps") .withArgument("doneCoredumpsPath", environment.pathInNodeAdminToDoneCoredumps()) .withArgument("coredumpsPath", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/crash"))) .withArgument("feedEndpoint", environment.getCoredumpFeedEndpoint().get()) .withArgument("attributes", attributes); } /** * Deletes old * * archived app data * * Vespa logs * * Filedistribution files */ public void cleanNodeAdmin() { if (! getMaintenanceThrottlerFor(NODE_ADMIN).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); maintainerExecutor.addJob("delete-directories") .withArgument("basePath", environment.getPathResolver().getApplicationStoragePathForNodeAdmin()) .withArgument("maxAgeSeconds", Duration.ofDays(7).getSeconds()) .withArgument("dirNameRegex", "^" + Pattern.quote(Environment.APPLICATION_STORAGE_CLEANUP_PATH_PREFIX)); Path nodeAdminJDiskLogsPath = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("logs/vespa/")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", nodeAdminJDiskLogsPath) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); maintainerExecutor.execute(); getMaintenanceThrottlerFor(NODE_ADMIN).updateNextRemoveOldFilesTime(); } /** * Prepares the container-storage for the next container by deleting/archiving all the data of the current container. * Removes old files, reports coredumps and archives container data, runs when container enters state "dirty" */ public void cleanupNodeStorage(ContainerName containerName, NodeSpec node) { MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); addArchiveNodeData(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).reset(); } private void addArchiveNodeData(MaintainerExecutor maintainerExecutor, ContainerName containerName) { maintainerExecutor.addJob("recursive-delete") .withArgument("path", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var"))); maintainerExecutor.addJob("move-files") .withArgument("from", environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/"))) .withArgument("to", environment.pathInNodeAdminToNodeCleanup(containerName)); } /** * Runs node-maintainer's SpecVerifier and returns its output * @param node Node specification containing the excepted values we want to verify against * @return new combined hardware divergence * @throws RuntimeException if exit code != 0 */ public String getHardwareDivergence(NodeSpec node) { List<String> arguments = new ArrayList<>(Arrays.asList("specification", "--disk", Double.toString(node.getMinDiskAvailableGb()), "--memory", Double.toString(node.getMinMainMemoryAvailableGb()), "--cpu_cores", Double.toString(node.getMinCpuCores()), "--is_ssd", Boolean.toString(node.isFastDisk()), "--ips", String.join(",", node.getIpAddresses()))); node.getHardwareDivergence().ifPresent(hardwareDivergence -> { arguments.add("--divergence"); arguments.add(hardwareDivergence); }); return executeMaintainer("com.yahoo.vespa.hosted.node.verification.Main", arguments.toArray(new String[0])); } private String executeMaintainer(String mainClass, String... args) { String[] command = Stream.concat( Stream.of("sudo", "VESPA_HOME=" + getDefaults().vespaHome(), getDefaults().underVespaHome("libexec/vespa/node-admin/maintenance.sh"), mainClass), Stream.of(args)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(command); if (result.getFirst() != 0) { numberOfNodeAdminMaintenanceFails.add(); throw new RuntimeException( String.format("Maintainer failed to execute command: %s, Exit code: %d, Stdout/stderr: %s", Arrays.toString(command), result.getFirst(), result.getSecond())); } return result.getSecond().trim(); } catch (IOException e) { throw new RuntimeException("Failed to execute maintainer", e); } } /** * Wrapper for node-admin-maintenance, queues up maintenances jobs and sends a single request to maintenance JVM */ private class MaintainerExecutor { private final List<MaintainerExecutorJob> jobs = new ArrayList<>(); MaintainerExecutorJob addJob(String jobName) { MaintainerExecutorJob job = new MaintainerExecutorJob(jobName); jobs.add(job); return job; } void execute() { String args; try { args = objectMapper.writeValueAsString(jobs); } catch (JsonProcessingException e) { throw new RuntimeException("Failed transform list of maintenance jobs to JSON"); } executeMaintainer("com.yahoo.vespa.hosted.node.maintainer.Maintainer", args); } } private class MaintainerExecutorJob { @JsonProperty(value="type") private final String type; @JsonProperty(value="arguments") private final Map<String, Object> arguments = new HashMap<>(); MaintainerExecutorJob(String type) { this.type = type; } MaintainerExecutorJob withArgument(String argument, Object value) { arguments.put(argument, (value instanceof Path) ? value.toString() : value); return this; } } private MaintenanceThrottler getMaintenanceThrottlerFor(ContainerName containerName) { maintenanceThrottlerByContainerName.putIfAbsent(containerName, new MaintenanceThrottler()); return maintenanceThrottlerByContainerName.get(containerName); } private class MaintenanceThrottler { private Instant nextRemoveOldFilesAt = Instant.EPOCH; private Instant nextHandleOldCoredumpsAt = Instant.EPOCH; void updateNextRemoveOldFilesTime() { nextRemoveOldFilesAt = clock.instant().plus(Duration.ofHours(1)); } boolean shouldRemoveOldFilesNow() { return !nextRemoveOldFilesAt.isAfter(clock.instant()); } void updateNextHandleCoredumpsTime() { nextHandleOldCoredumpsAt = clock.instant().plus(Duration.ofMinutes(5)); } boolean shouldHandleCoredumpsNow() { return !nextHandleOldCoredumpsAt.isAfter(clock.instant()); } void reset() { nextRemoveOldFilesAt = Instant.EPOCH; nextHandleOldCoredumpsAt = Instant.EPOCH; } } }
class StorageMaintainer { private static final ContainerName NODE_ADMIN = new ContainerName("node-admin"); private static final ObjectMapper objectMapper = new ObjectMapper(); private final GaugeWrapper numberOfCoredumpsOnHost; private final CounterWrapper numberOfNodeAdminMaintenanceFails; private final DockerOperations dockerOperations; private final ProcessExecuter processExecuter; private final Environment environment; private final Clock clock; private Map<ContainerName, MaintenanceThrottler> maintenanceThrottlerByContainerName = new ConcurrentHashMap<>(); public StorageMaintainer(DockerOperations dockerOperations, ProcessExecuter processExecuter, MetricReceiverWrapper metricReceiver, Environment environment, Clock clock) { this.dockerOperations = dockerOperations; this.processExecuter = processExecuter; this.environment = environment; this.clock = clock; Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); numberOfNodeAdminMaintenanceFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.maintenance.fails"); numberOfCoredumpsOnHost = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.coredumps"); } private SecretAgentCheckConfig annotatedCheck(NodeSpec node, SecretAgentCheckConfig check) { check.withTag("namespace", "Vespa") .withTag("role", "tenants") .withTag("flavor", node.getFlavor()) .withTag("canonicalFlavor", node.getCanonicalFlavor()) .withTag("state", node.getState().toString()) .withTag("zone", environment.getZone()) .withTag("parentHostname", environment.getParentHostHostname()); node.getOwner().ifPresent(owner -> check .withTag("tenantName", owner.getTenant()) .withTag("app", owner.getApplication() + "." + owner.getInstance()) .withTag("applicationName", owner.getApplication()) .withTag("instanceName", owner.getInstance()) .withTag("applicationId", owner.getTenant() + "." + owner.getApplication() + "." + owner.getInstance())); node.getMembership().ifPresent(membership -> check .withTag("clustertype", membership.getClusterType()) .withTag("clusterid", membership.getClusterId())); node.getVespaVersion().ifPresent(version -> check.withTag("vespaVersion", version)); return check; } public void writeFilebeatConfig(ContainerName containerName, NodeSpec node) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); try { FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment); Optional<String> config = filebeatConfigProvider.getConfig(node); if (!config.isPresent()) { logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + node.toString()); return; } Path filebeatPath = environment.pathInNodeAdminFromPathInNode( containerName, Paths.get("/etc/filebeat/filebeat.yml")); Files.write(filebeatPath, config.get().getBytes()); logger.info("Wrote filebeat config."); } catch (Throwable t) { logger.error("Failed writing filebeat config; " + node, t); } } public Optional<Long> getDiskUsageFor(ContainerName containerName) { Path containerDir = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/home/")); try { return Optional.of(getDiskUsedInBytes(containerDir)); } catch (Throwable e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); logger.error("Problems during disk usage calculations in " + containerDir.toAbsolutePath(), e); return Optional.empty(); } } long getDiskUsedInBytes(Path path) throws IOException, InterruptedException { if (!Files.exists(path)) { return 0; } final String[] command = {"du", "-xsk", path.toString()}; Process duCommand = new ProcessBuilder().command(command).start(); if (!duCommand.waitFor(60, TimeUnit.SECONDS)) { duCommand.destroy(); throw new RuntimeException("Disk usage command timed out, aborting."); } String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream())); String[] results = output.split("\t"); if (results.length != 2) { throw new RuntimeException("Result from disk usage command not as expected: " + output); } long diskUsageKB = Long.valueOf(results[0]); return diskUsageKB * 1024; } /** * Deletes old log files for vespa, nginx, logstash, etc. */ public void removeOldFilesFromNode(ContainerName containerName) { if (! getMaintenanceThrottlerFor(containerName).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextRemoveOldFilesTime(); } private void addRemoveOldFilesCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName) { Path[] pathsToClean = { environment.pathInNodeUnderVespaHome("logs/elasticsearch2"), environment.pathInNodeUnderVespaHome("logs/logstash2"), environment.pathInNodeUnderVespaHome("logs/daemontools_y"), environment.pathInNodeUnderVespaHome("logs/nginx"), environment.pathInNodeUnderVespaHome("logs/vespa") }; for (Path pathToClean : pathsToClean) { Path path = environment.pathInNodeAdminFromPathInNode(containerName, pathToClean); if (Files.exists(path)) { maintainerExecutor.addJob("delete-files") .withArgument("basePath", path) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*\\.log.+") .withArgument("recursive", false); } } Path qrsDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/qrs")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", qrsDir) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*QueryAccessLog.*") .withArgument("recursive", false); Path logArchiveDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/logarchive")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", logArchiveDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); } /** * Checks if container has any new coredumps, reports and archives them if so * * @param force Set to true to bypass throttling */ public void handleCoreDumpsForContainer(ContainerName containerName, NodeSpec node, boolean force) { try { numberOfCoredumpsOnHost.sample(Files.list(environment.pathInNodeAdminToDoneCoredumps()).count()); } catch (IOException e) { } if (! getMaintenanceThrottlerFor(containerName).shouldHandleCoredumpsNow() && !force) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextHandleCoredumpsTime(); } private void addHandleCoredumpsCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName, NodeSpec node) { if (!environment.getCoredumpFeedEndpoint().isPresent()) { return; } Map<String, Object> attributes = new HashMap<>(); attributes.put("hostname", node.getHostname()); attributes.put("parent_hostname", environment.getParentHostHostname()); attributes.put("region", environment.getRegion()); attributes.put("environment", environment.getEnvironment()); attributes.put("flavor", node.getFlavor()); attributes.put("kernel_version", System.getProperty("os.version")); node.getCurrentDockerImage().ifPresent(image -> attributes.put("docker_image", image.asString())); node.getVespaVersion().ifPresent(version -> attributes.put("vespa_version", version)); node.getOwner().ifPresent(owner -> { attributes.put("tenant", owner.getTenant()); attributes.put("application", owner.getApplication()); attributes.put("instance", owner.getInstance()); }); maintainerExecutor.addJob("handle-core-dumps") .withArgument("doneCoredumpsPath", environment.pathInNodeAdminToDoneCoredumps()) .withArgument("coredumpsPath", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/crash"))) .withArgument("feedEndpoint", environment.getCoredumpFeedEndpoint().get()) .withArgument("attributes", attributes); } /** * Deletes old * * archived app data * * Vespa logs * * Filedistribution files */ public void cleanNodeAdmin() { if (! getMaintenanceThrottlerFor(NODE_ADMIN).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); maintainerExecutor.addJob("delete-directories") .withArgument("basePath", environment.getPathResolver().getApplicationStoragePathForNodeAdmin()) .withArgument("maxAgeSeconds", Duration.ofDays(7).getSeconds()) .withArgument("dirNameRegex", "^" + Pattern.quote(Environment.APPLICATION_STORAGE_CLEANUP_PATH_PREFIX)); Path nodeAdminJDiskLogsPath = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("logs/vespa/")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", nodeAdminJDiskLogsPath) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); maintainerExecutor.execute(); getMaintenanceThrottlerFor(NODE_ADMIN).updateNextRemoveOldFilesTime(); } /** * Prepares the container-storage for the next container by deleting/archiving all the data of the current container. * Removes old files, reports coredumps and archives container data, runs when container enters state "dirty" */ public void cleanupNodeStorage(ContainerName containerName, NodeSpec node) { MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); addArchiveNodeData(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).reset(); } private void addArchiveNodeData(MaintainerExecutor maintainerExecutor, ContainerName containerName) { maintainerExecutor.addJob("recursive-delete") .withArgument("path", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var"))); maintainerExecutor.addJob("move-files") .withArgument("from", environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/"))) .withArgument("to", environment.pathInNodeAdminToNodeCleanup(containerName)); } /** * Runs node-maintainer's SpecVerifier and returns its output * @param node Node specification containing the excepted values we want to verify against * @return new combined hardware divergence * @throws RuntimeException if exit code != 0 */ public String getHardwareDivergence(NodeSpec node) { List<String> arguments = new ArrayList<>(Arrays.asList("specification", "--disk", Double.toString(node.getMinDiskAvailableGb()), "--memory", Double.toString(node.getMinMainMemoryAvailableGb()), "--cpu_cores", Double.toString(node.getMinCpuCores()), "--is_ssd", Boolean.toString(node.isFastDisk()), "--ips", String.join(",", node.getIpAddresses()))); node.getHardwareDivergence().ifPresent(hardwareDivergence -> { arguments.add("--divergence"); arguments.add(hardwareDivergence); }); return executeMaintainer("com.yahoo.vespa.hosted.node.verification.Main", arguments.toArray(new String[0])); } private String executeMaintainer(String mainClass, String... args) { String[] command = Stream.concat( Stream.of("sudo", "VESPA_HOME=" + getDefaults().vespaHome(), getDefaults().underVespaHome("libexec/vespa/node-admin/maintenance.sh"), mainClass), Stream.of(args)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(command); if (result.getFirst() != 0) { numberOfNodeAdminMaintenanceFails.add(); throw new RuntimeException( String.format("Maintainer failed to execute command: %s, Exit code: %d, Stdout/stderr: %s", Arrays.toString(command), result.getFirst(), result.getSecond())); } return result.getSecond().trim(); } catch (IOException e) { throw new RuntimeException("Failed to execute maintainer", e); } } /** * Wrapper for node-admin-maintenance, queues up maintenances jobs and sends a single request to maintenance JVM */ private class MaintainerExecutor { private final List<MaintainerExecutorJob> jobs = new ArrayList<>(); MaintainerExecutorJob addJob(String jobName) { MaintainerExecutorJob job = new MaintainerExecutorJob(jobName); jobs.add(job); return job; } void execute() { String args; try { args = objectMapper.writeValueAsString(jobs); } catch (JsonProcessingException e) { throw new RuntimeException("Failed transform list of maintenance jobs to JSON"); } executeMaintainer("com.yahoo.vespa.hosted.node.maintainer.Maintainer", args); } } private class MaintainerExecutorJob { @JsonProperty(value="type") private final String type; @JsonProperty(value="arguments") private final Map<String, Object> arguments = new HashMap<>(); MaintainerExecutorJob(String type) { this.type = type; } MaintainerExecutorJob withArgument(String argument, Object value) { arguments.put(argument, (value instanceof Path) ? value.toString() : value); return this; } } private MaintenanceThrottler getMaintenanceThrottlerFor(ContainerName containerName) { maintenanceThrottlerByContainerName.putIfAbsent(containerName, new MaintenanceThrottler()); return maintenanceThrottlerByContainerName.get(containerName); } private class MaintenanceThrottler { private Instant nextRemoveOldFilesAt = Instant.EPOCH; private Instant nextHandleOldCoredumpsAt = Instant.EPOCH; void updateNextRemoveOldFilesTime() { nextRemoveOldFilesAt = clock.instant().plus(Duration.ofHours(1)); } boolean shouldRemoveOldFilesNow() { return !nextRemoveOldFilesAt.isAfter(clock.instant()); } void updateNextHandleCoredumpsTime() { nextHandleOldCoredumpsAt = clock.instant().plus(Duration.ofMinutes(5)); } boolean shouldHandleCoredumpsNow() { return !nextHandleOldCoredumpsAt.isAfter(clock.instant()); } void reset() { nextRemoveOldFilesAt = Instant.EPOCH; nextHandleOldCoredumpsAt = Instant.EPOCH; } } }
My underastanding was that pathInNodeUnderVespaHome is used to find the path when on the host but when you are in the container you would refer to the path directly (at least not the same path as when at the host)
public void writeMetricsConfig(ContainerName containerName, NodeSpec node) { List<SecretAgentCheckConfig> configs = new ArrayList<>(); Path vespaCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa"); SecretAgentCheckConfig vespaSchedule = new SecretAgentCheckConfig("vespa", 60, vespaCheckPath, "all"); configs.add(annotatedCheck(node, vespaSchedule)); Path vespaHealthCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa_health"); SecretAgentCheckConfig vespaHealthSchedule = new SecretAgentCheckConfig("vespa", 60, vespaHealthCheckPath, "all"); configs.add(annotatedCheck(node, vespaHealthSchedule)); Path hostLifeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_host_life"); SecretAgentCheckConfig hostLifeSchedule = new SecretAgentCheckConfig("host-life", 60, hostLifeCheckPath); configs.add(annotatedCheck(node, hostLifeSchedule)); Path ntpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ntp"); SecretAgentCheckConfig ntpSchedule = new SecretAgentCheckConfig("host-life", 60, ntpCheckPath); configs.add(annotatedCheck(node, ntpSchedule)); Path coredumpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_coredumps"); SecretAgentCheckConfig coredumpSchedule = new SecretAgentCheckConfig("system-coredumps-processing", 300, coredumpCheckPath, "--application", "system-coredumps-processing", "--lastmin", "129600", "--crit", "1", "--coredir", "/home/y/var/crash/processing"); configs.add(annotatedCheck(node, coredumpSchedule)); if (node.getNodeType() == NodeType.config) { Path configServerCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ymonsb2"); SecretAgentCheckConfig configServerSchedule = new SecretAgentCheckConfig("configserver", 60, configServerCheckPath, "-zero", "configserver"); configs.add(annotatedCheck(node, configServerSchedule)); Path zkbackupCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig zkbackupSchedule = new SecretAgentCheckConfig("zkbackupage", 300, zkbackupCheckPath, "-f", "/home/y/var/vespa-hosted/zkbackup.stat", "-m", "150", "-a", "config-zkbackupage"); configs.add(annotatedCheck(node, zkbackupSchedule)); } if (node.getNodeType() == NodeType.proxy) { Path routingAgeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig routingAgeSchedule = new SecretAgentCheckConfig("routing-configage", 60, routingAgeCheckPath, "-f", "home/y/var/vespa-hosted/routing/nginx.conf", "-m", "90", "-a", "routing-configage"); configs.add(annotatedCheck(node, routingAgeSchedule)); Path sslCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ssl_status"); SecretAgentCheckConfig sslSchedule = new SecretAgentCheckConfig("zkbackupage", 300, sslCheckPath, "-e", "localhost", "-p", "4443", "-t", "30"); configs.add(annotatedCheck(node, sslSchedule)); } Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/etc/yamas-agent/")); configs.forEach(s -> IOExceptionUtil.uncheck(() -> s.writeTo(yamasAgentFolder))); final String[] restartYamasAgent = new String[]{"service", "yamas-agent", "restart"}; dockerOperations.executeCommandInContainerAsRoot(containerName, restartYamasAgent); }
"129600", "--crit", "1", "--coredir", "/home/y/var/crash/processing");
public void writeMetricsConfig(ContainerName containerName, NodeSpec node) { List<SecretAgentCheckConfig> configs = new ArrayList<>(); Path hostLifeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_host_life"); SecretAgentCheckConfig hostLifeSchedule = new SecretAgentCheckConfig("host-life", 60, hostLifeCheckPath); configs.add(annotatedCheck(node, hostLifeSchedule)); Path ntpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ntp"); SecretAgentCheckConfig ntpSchedule = new SecretAgentCheckConfig("ntp", 60, ntpCheckPath); configs.add(annotatedCheck(node, ntpSchedule)); Path coredumpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_coredumps"); SecretAgentCheckConfig coredumpSchedule = new SecretAgentCheckConfig("system-coredumps-processing", 300, coredumpCheckPath, "--application", "system-coredumps-processing", "--lastmin", "129600", "--crit", "1", "--coredir", environment.pathInNodeUnderVespaHome("var/crash/processing").toString()); configs.add(annotatedCheck(node, coredumpSchedule)); if (node.getNodeType() != NodeType.config) { Path vespaHealthCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa_health"); SecretAgentCheckConfig vespaHealthSchedule = new SecretAgentCheckConfig("vespa-health", 60, vespaHealthCheckPath, "all"); configs.add(annotatedCheck(node, vespaHealthSchedule)); Path vespaCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa"); SecretAgentCheckConfig vespaSchedule = new SecretAgentCheckConfig("vespa", 60, vespaCheckPath, "all"); configs.add(annotatedCheck(node, vespaSchedule)); } if (node.getNodeType() == NodeType.config) { Path configServerCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ymonsb2"); SecretAgentCheckConfig configServerSchedule = new SecretAgentCheckConfig("configserver", 60, configServerCheckPath, "-zero", "configserver"); configs.add(annotatedCheck(node, configServerSchedule)); Path zkbackupCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig zkbackupSchedule = new SecretAgentCheckConfig("zkbackupage", 300, zkbackupCheckPath, "-f", environment.pathInNodeUnderVespaHome("var/vespa-hosted/zkbackup.stat").toString(), "-m", "150", "-a", "config-zkbackupage"); configs.add(annotatedCheck(node, zkbackupSchedule)); } if (node.getNodeType() == NodeType.proxy) { Path routingAgeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig routingAgeSchedule = new SecretAgentCheckConfig("routing-configage", 60, routingAgeCheckPath, "-f", environment.pathInNodeUnderVespaHome("var/vespa-hosted/routing/nginx.conf").toString(), "-m", "90", "-a", "routing-configage"); configs.add(annotatedCheck(node, routingAgeSchedule)); Path sslCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ssl_status"); SecretAgentCheckConfig sslSchedule = new SecretAgentCheckConfig("ssl-status", 300, sslCheckPath, "-e", "localhost", "-p", "4443", "-t", "30"); configs.add(annotatedCheck(node, sslSchedule)); } Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/etc/yamas-agent/")); configs.forEach(s -> IOExceptionUtil.uncheck(() -> s.writeTo(yamasAgentFolder))); final String[] restartYamasAgent = new String[]{"service", "yamas-agent", "restart"}; dockerOperations.executeCommandInContainerAsRoot(containerName, restartYamasAgent); }
class StorageMaintainer { private static final ContainerName NODE_ADMIN = new ContainerName("node-admin"); private static final ObjectMapper objectMapper = new ObjectMapper(); private final GaugeWrapper numberOfCoredumpsOnHost; private final CounterWrapper numberOfNodeAdminMaintenanceFails; private final DockerOperations dockerOperations; private final ProcessExecuter processExecuter; private final Environment environment; private final Clock clock; private Map<ContainerName, MaintenanceThrottler> maintenanceThrottlerByContainerName = new ConcurrentHashMap<>(); public StorageMaintainer(DockerOperations dockerOperations, ProcessExecuter processExecuter, MetricReceiverWrapper metricReceiver, Environment environment, Clock clock) { this.dockerOperations = dockerOperations; this.processExecuter = processExecuter; this.environment = environment; this.clock = clock; Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); numberOfNodeAdminMaintenanceFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.maintenance.fails"); numberOfCoredumpsOnHost = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.coredumps"); } private SecretAgentCheckConfig annotatedCheck(NodeSpec node, SecretAgentCheckConfig check) { check.withTag("namespace", "Vespa") .withTag("role", "tenants") .withTag("flavor", node.getFlavor()) .withTag("canonicalFlavor", node.getCanonicalFlavor()) .withTag("state", node.getState().toString()) .withTag("zone", environment.getZone()) .withTag("parentHostname", environment.getParentHostHostname()); node.getOwner().ifPresent(owner -> check .withTag("tenantName", owner.getTenant()) .withTag("app", owner.getApplication() + "." + owner.getInstance()) .withTag("applicationName", owner.getApplication()) .withTag("instanceName", owner.getInstance()) .withTag("applicationId", owner.getTenant() + "." + owner.getApplication() + "." + owner.getInstance())); node.getMembership().ifPresent(membership -> check .withTag("clustertype", membership.getClusterType()) .withTag("clusterid", membership.getClusterId())); node.getVespaVersion().ifPresent(version -> check.withTag("vespaVersion", version)); return check; } public void writeFilebeatConfig(ContainerName containerName, NodeSpec node) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); try { FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment); Optional<String> config = filebeatConfigProvider.getConfig(node); if (!config.isPresent()) { logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + node.toString()); return; } Path filebeatPath = environment.pathInNodeAdminFromPathInNode( containerName, Paths.get("/etc/filebeat/filebeat.yml")); Files.write(filebeatPath, config.get().getBytes()); logger.info("Wrote filebeat config."); } catch (Throwable t) { logger.error("Failed writing filebeat config; " + node, t); } } public Optional<Long> getDiskUsageFor(ContainerName containerName) { Path containerDir = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/home/")); try { return Optional.of(getDiskUsedInBytes(containerDir)); } catch (Throwable e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); logger.error("Problems during disk usage calculations in " + containerDir.toAbsolutePath(), e); return Optional.empty(); } } long getDiskUsedInBytes(Path path) throws IOException, InterruptedException { if (!Files.exists(path)) { return 0; } final String[] command = {"du", "-xsk", path.toString()}; Process duCommand = new ProcessBuilder().command(command).start(); if (!duCommand.waitFor(60, TimeUnit.SECONDS)) { duCommand.destroy(); throw new RuntimeException("Disk usage command timed out, aborting."); } String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream())); String[] results = output.split("\t"); if (results.length != 2) { throw new RuntimeException("Result from disk usage command not as expected: " + output); } long diskUsageKB = Long.valueOf(results[0]); return diskUsageKB * 1024; } /** * Deletes old log files for vespa, nginx, logstash, etc. */ public void removeOldFilesFromNode(ContainerName containerName) { if (! getMaintenanceThrottlerFor(containerName).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextRemoveOldFilesTime(); } private void addRemoveOldFilesCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName) { Path[] pathsToClean = { environment.pathInNodeUnderVespaHome("logs/elasticsearch2"), environment.pathInNodeUnderVespaHome("logs/logstash2"), environment.pathInNodeUnderVespaHome("logs/daemontools_y"), environment.pathInNodeUnderVespaHome("logs/nginx"), environment.pathInNodeUnderVespaHome("logs/vespa") }; for (Path pathToClean : pathsToClean) { Path path = environment.pathInNodeAdminFromPathInNode(containerName, pathToClean); if (Files.exists(path)) { maintainerExecutor.addJob("delete-files") .withArgument("basePath", path) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*\\.log.+") .withArgument("recursive", false); } } Path qrsDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/qrs")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", qrsDir) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*QueryAccessLog.*") .withArgument("recursive", false); Path logArchiveDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/logarchive")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", logArchiveDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); } /** * Checks if container has any new coredumps, reports and archives them if so * * @param force Set to true to bypass throttling */ public void handleCoreDumpsForContainer(ContainerName containerName, NodeSpec node, boolean force) { IOExceptionUtil.uncheck(() -> numberOfCoredumpsOnHost.sample(Files.list(environment.pathInNodeAdminToDoneCoredumps()).count())); if (! getMaintenanceThrottlerFor(containerName).shouldHandleCoredumpsNow() && !force) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextHandleCoredumpsTime(); } private void addHandleCoredumpsCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName, NodeSpec node) { if (!environment.getCoredumpFeedEndpoint().isPresent()) { return; } Map<String, Object> attributes = new HashMap<>(); attributes.put("hostname", node.getHostname()); attributes.put("parent_hostname", environment.getParentHostHostname()); attributes.put("region", environment.getRegion()); attributes.put("environment", environment.getEnvironment()); attributes.put("flavor", node.getFlavor()); attributes.put("kernel_version", System.getProperty("os.version")); node.getCurrentDockerImage().ifPresent(image -> attributes.put("docker_image", image.asString())); node.getVespaVersion().ifPresent(version -> attributes.put("vespa_version", version)); node.getOwner().ifPresent(owner -> { attributes.put("tenant", owner.getTenant()); attributes.put("application", owner.getApplication()); attributes.put("instance", owner.getInstance()); }); maintainerExecutor.addJob("handle-core-dumps") .withArgument("doneCoredumpsPath", environment.pathInNodeAdminToDoneCoredumps()) .withArgument("coredumpsPath", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/crash"))) .withArgument("feedEndpoint", environment.getCoredumpFeedEndpoint().get()) .withArgument("attributes", attributes); } /** * Deletes old * * archived app data * * Vespa logs * * Filedistribution files */ public void cleanNodeAdmin() { if (! getMaintenanceThrottlerFor(NODE_ADMIN).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); maintainerExecutor.addJob("delete-directories") .withArgument("basePath", environment.getPathResolver().getApplicationStoragePathForNodeAdmin()) .withArgument("maxAgeSeconds", Duration.ofDays(7).getSeconds()) .withArgument("dirNameRegex", "^" + Pattern.quote(Environment.APPLICATION_STORAGE_CLEANUP_PATH_PREFIX)); Path nodeAdminJDiskLogsPath = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("logs/vespa/")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", nodeAdminJDiskLogsPath) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); maintainerExecutor.execute(); getMaintenanceThrottlerFor(NODE_ADMIN).updateNextRemoveOldFilesTime(); } /** * Prepares the container-storage for the next container by deleting/archiving all the data of the current container. * Removes old files, reports coredumps and archives container data, runs when container enters state "dirty" */ public void cleanupNodeStorage(ContainerName containerName, NodeSpec node) { MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); addArchiveNodeData(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).reset(); } private void addArchiveNodeData(MaintainerExecutor maintainerExecutor, ContainerName containerName) { maintainerExecutor.addJob("recursive-delete") .withArgument("path", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var"))); maintainerExecutor.addJob("move-files") .withArgument("from", environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/"))) .withArgument("to", environment.pathInNodeAdminToNodeCleanup(containerName)); } /** * Runs node-maintainer's SpecVerifier and returns its output * @param node Node specification containing the excepted values we want to verify against * @return new combined hardware divergence * @throws RuntimeException if exit code != 0 */ public String getHardwareDivergence(NodeSpec node) { List<String> arguments = new ArrayList<>(Arrays.asList("specification", "--disk", Double.toString(node.getMinDiskAvailableGb()), "--memory", Double.toString(node.getMinMainMemoryAvailableGb()), "--cpu_cores", Double.toString(node.getMinCpuCores()), "--is_ssd", Boolean.toString(node.isFastDisk()), "--ips", String.join(",", node.getIpAddresses()))); node.getHardwareDivergence().ifPresent(hardwareDivergence -> { arguments.add("--divergence"); arguments.add(hardwareDivergence); }); return executeMaintainer("com.yahoo.vespa.hosted.node.verification.Main", arguments.toArray(new String[0])); } private String executeMaintainer(String mainClass, String... args) { String[] command = Stream.concat( Stream.of("sudo", "VESPA_HOME=" + getDefaults().vespaHome(), getDefaults().underVespaHome("libexec/vespa/node-admin/maintenance.sh"), mainClass), Stream.of(args)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(command); if (result.getFirst() != 0) { numberOfNodeAdminMaintenanceFails.add(); throw new RuntimeException( String.format("Maintainer failed to execute command: %s, Exit code: %d, Stdout/stderr: %s", Arrays.toString(command), result.getFirst(), result.getSecond())); } return result.getSecond().trim(); } catch (IOException e) { throw new RuntimeException("Failed to execute maintainer", e); } } /** * Wrapper for node-admin-maintenance, queues up maintenances jobs and sends a single request to maintenance JVM */ private class MaintainerExecutor { private final List<MaintainerExecutorJob> jobs = new ArrayList<>(); MaintainerExecutorJob addJob(String jobName) { MaintainerExecutorJob job = new MaintainerExecutorJob(jobName); jobs.add(job); return job; } void execute() { String args; try { args = objectMapper.writeValueAsString(jobs); } catch (JsonProcessingException e) { throw new RuntimeException("Failed transform list of maintenance jobs to JSON"); } executeMaintainer("com.yahoo.vespa.hosted.node.maintainer.Maintainer", args); } } private class MaintainerExecutorJob { @JsonProperty(value="type") private final String type; @JsonProperty(value="arguments") private final Map<String, Object> arguments = new HashMap<>(); MaintainerExecutorJob(String type) { this.type = type; } MaintainerExecutorJob withArgument(String argument, Object value) { arguments.put(argument, (value instanceof Path) ? value.toString() : value); return this; } } private MaintenanceThrottler getMaintenanceThrottlerFor(ContainerName containerName) { maintenanceThrottlerByContainerName.putIfAbsent(containerName, new MaintenanceThrottler()); return maintenanceThrottlerByContainerName.get(containerName); } private class MaintenanceThrottler { private Instant nextRemoveOldFilesAt = Instant.EPOCH; private Instant nextHandleOldCoredumpsAt = Instant.EPOCH; void updateNextRemoveOldFilesTime() { nextRemoveOldFilesAt = clock.instant().plus(Duration.ofHours(1)); } boolean shouldRemoveOldFilesNow() { return !nextRemoveOldFilesAt.isAfter(clock.instant()); } void updateNextHandleCoredumpsTime() { nextHandleOldCoredumpsAt = clock.instant().plus(Duration.ofMinutes(5)); } boolean shouldHandleCoredumpsNow() { return !nextHandleOldCoredumpsAt.isAfter(clock.instant()); } void reset() { nextRemoveOldFilesAt = Instant.EPOCH; nextHandleOldCoredumpsAt = Instant.EPOCH; } } }
class StorageMaintainer { private static final ContainerName NODE_ADMIN = new ContainerName("node-admin"); private static final ObjectMapper objectMapper = new ObjectMapper(); private final GaugeWrapper numberOfCoredumpsOnHost; private final CounterWrapper numberOfNodeAdminMaintenanceFails; private final DockerOperations dockerOperations; private final ProcessExecuter processExecuter; private final Environment environment; private final Clock clock; private Map<ContainerName, MaintenanceThrottler> maintenanceThrottlerByContainerName = new ConcurrentHashMap<>(); public StorageMaintainer(DockerOperations dockerOperations, ProcessExecuter processExecuter, MetricReceiverWrapper metricReceiver, Environment environment, Clock clock) { this.dockerOperations = dockerOperations; this.processExecuter = processExecuter; this.environment = environment; this.clock = clock; Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); numberOfNodeAdminMaintenanceFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.maintenance.fails"); numberOfCoredumpsOnHost = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.coredumps"); } private SecretAgentCheckConfig annotatedCheck(NodeSpec node, SecretAgentCheckConfig check) { check.withTag("namespace", "Vespa") .withTag("role", "tenants") .withTag("flavor", node.getFlavor()) .withTag("canonicalFlavor", node.getCanonicalFlavor()) .withTag("state", node.getState().toString()) .withTag("zone", environment.getZone()) .withTag("parentHostname", environment.getParentHostHostname()); node.getOwner().ifPresent(owner -> check .withTag("tenantName", owner.getTenant()) .withTag("app", owner.getApplication() + "." + owner.getInstance()) .withTag("applicationName", owner.getApplication()) .withTag("instanceName", owner.getInstance()) .withTag("applicationId", owner.getTenant() + "." + owner.getApplication() + "." + owner.getInstance())); node.getMembership().ifPresent(membership -> check .withTag("clustertype", membership.getClusterType()) .withTag("clusterid", membership.getClusterId())); node.getVespaVersion().ifPresent(version -> check.withTag("vespaVersion", version)); return check; } public void writeFilebeatConfig(ContainerName containerName, NodeSpec node) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); try { FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment); Optional<String> config = filebeatConfigProvider.getConfig(node); if (!config.isPresent()) { logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + node.toString()); return; } Path filebeatPath = environment.pathInNodeAdminFromPathInNode( containerName, Paths.get("/etc/filebeat/filebeat.yml")); Files.write(filebeatPath, config.get().getBytes()); logger.info("Wrote filebeat config."); } catch (Throwable t) { logger.error("Failed writing filebeat config; " + node, t); } } public Optional<Long> getDiskUsageFor(ContainerName containerName) { Path containerDir = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/home/")); try { return Optional.of(getDiskUsedInBytes(containerDir)); } catch (Throwable e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); logger.error("Problems during disk usage calculations in " + containerDir.toAbsolutePath(), e); return Optional.empty(); } } long getDiskUsedInBytes(Path path) throws IOException, InterruptedException { if (!Files.exists(path)) { return 0; } final String[] command = {"du", "-xsk", path.toString()}; Process duCommand = new ProcessBuilder().command(command).start(); if (!duCommand.waitFor(60, TimeUnit.SECONDS)) { duCommand.destroy(); throw new RuntimeException("Disk usage command timed out, aborting."); } String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream())); String[] results = output.split("\t"); if (results.length != 2) { throw new RuntimeException("Result from disk usage command not as expected: " + output); } long diskUsageKB = Long.valueOf(results[0]); return diskUsageKB * 1024; } /** * Deletes old log files for vespa, nginx, logstash, etc. */ public void removeOldFilesFromNode(ContainerName containerName) { if (! getMaintenanceThrottlerFor(containerName).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextRemoveOldFilesTime(); } private void addRemoveOldFilesCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName) { Path[] pathsToClean = { environment.pathInNodeUnderVespaHome("logs/elasticsearch2"), environment.pathInNodeUnderVespaHome("logs/logstash2"), environment.pathInNodeUnderVespaHome("logs/daemontools_y"), environment.pathInNodeUnderVespaHome("logs/nginx"), environment.pathInNodeUnderVespaHome("logs/vespa") }; for (Path pathToClean : pathsToClean) { Path path = environment.pathInNodeAdminFromPathInNode(containerName, pathToClean); if (Files.exists(path)) { maintainerExecutor.addJob("delete-files") .withArgument("basePath", path) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*\\.log.+") .withArgument("recursive", false); } } Path qrsDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/qrs")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", qrsDir) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*QueryAccessLog.*") .withArgument("recursive", false); Path logArchiveDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/logarchive")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", logArchiveDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); } /** * Checks if container has any new coredumps, reports and archives them if so * * @param force Set to true to bypass throttling */ public void handleCoreDumpsForContainer(ContainerName containerName, NodeSpec node, boolean force) { try { numberOfCoredumpsOnHost.sample(Files.list(environment.pathInNodeAdminToDoneCoredumps()).count()); } catch (IOException e) { } if (! getMaintenanceThrottlerFor(containerName).shouldHandleCoredumpsNow() && !force) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextHandleCoredumpsTime(); } private void addHandleCoredumpsCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName, NodeSpec node) { if (!environment.getCoredumpFeedEndpoint().isPresent()) { return; } Map<String, Object> attributes = new HashMap<>(); attributes.put("hostname", node.getHostname()); attributes.put("parent_hostname", environment.getParentHostHostname()); attributes.put("region", environment.getRegion()); attributes.put("environment", environment.getEnvironment()); attributes.put("flavor", node.getFlavor()); attributes.put("kernel_version", System.getProperty("os.version")); node.getCurrentDockerImage().ifPresent(image -> attributes.put("docker_image", image.asString())); node.getVespaVersion().ifPresent(version -> attributes.put("vespa_version", version)); node.getOwner().ifPresent(owner -> { attributes.put("tenant", owner.getTenant()); attributes.put("application", owner.getApplication()); attributes.put("instance", owner.getInstance()); }); maintainerExecutor.addJob("handle-core-dumps") .withArgument("doneCoredumpsPath", environment.pathInNodeAdminToDoneCoredumps()) .withArgument("coredumpsPath", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/crash"))) .withArgument("feedEndpoint", environment.getCoredumpFeedEndpoint().get()) .withArgument("attributes", attributes); } /** * Deletes old * * archived app data * * Vespa logs * * Filedistribution files */ public void cleanNodeAdmin() { if (! getMaintenanceThrottlerFor(NODE_ADMIN).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); maintainerExecutor.addJob("delete-directories") .withArgument("basePath", environment.getPathResolver().getApplicationStoragePathForNodeAdmin()) .withArgument("maxAgeSeconds", Duration.ofDays(7).getSeconds()) .withArgument("dirNameRegex", "^" + Pattern.quote(Environment.APPLICATION_STORAGE_CLEANUP_PATH_PREFIX)); Path nodeAdminJDiskLogsPath = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("logs/vespa/")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", nodeAdminJDiskLogsPath) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); maintainerExecutor.execute(); getMaintenanceThrottlerFor(NODE_ADMIN).updateNextRemoveOldFilesTime(); } /** * Prepares the container-storage for the next container by deleting/archiving all the data of the current container. * Removes old files, reports coredumps and archives container data, runs when container enters state "dirty" */ public void cleanupNodeStorage(ContainerName containerName, NodeSpec node) { MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); addArchiveNodeData(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).reset(); } private void addArchiveNodeData(MaintainerExecutor maintainerExecutor, ContainerName containerName) { maintainerExecutor.addJob("recursive-delete") .withArgument("path", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var"))); maintainerExecutor.addJob("move-files") .withArgument("from", environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/"))) .withArgument("to", environment.pathInNodeAdminToNodeCleanup(containerName)); } /** * Runs node-maintainer's SpecVerifier and returns its output * @param node Node specification containing the excepted values we want to verify against * @return new combined hardware divergence * @throws RuntimeException if exit code != 0 */ public String getHardwareDivergence(NodeSpec node) { List<String> arguments = new ArrayList<>(Arrays.asList("specification", "--disk", Double.toString(node.getMinDiskAvailableGb()), "--memory", Double.toString(node.getMinMainMemoryAvailableGb()), "--cpu_cores", Double.toString(node.getMinCpuCores()), "--is_ssd", Boolean.toString(node.isFastDisk()), "--ips", String.join(",", node.getIpAddresses()))); node.getHardwareDivergence().ifPresent(hardwareDivergence -> { arguments.add("--divergence"); arguments.add(hardwareDivergence); }); return executeMaintainer("com.yahoo.vespa.hosted.node.verification.Main", arguments.toArray(new String[0])); } private String executeMaintainer(String mainClass, String... args) { String[] command = Stream.concat( Stream.of("sudo", "VESPA_HOME=" + getDefaults().vespaHome(), getDefaults().underVespaHome("libexec/vespa/node-admin/maintenance.sh"), mainClass), Stream.of(args)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(command); if (result.getFirst() != 0) { numberOfNodeAdminMaintenanceFails.add(); throw new RuntimeException( String.format("Maintainer failed to execute command: %s, Exit code: %d, Stdout/stderr: %s", Arrays.toString(command), result.getFirst(), result.getSecond())); } return result.getSecond().trim(); } catch (IOException e) { throw new RuntimeException("Failed to execute maintainer", e); } } /** * Wrapper for node-admin-maintenance, queues up maintenances jobs and sends a single request to maintenance JVM */ private class MaintainerExecutor { private final List<MaintainerExecutorJob> jobs = new ArrayList<>(); MaintainerExecutorJob addJob(String jobName) { MaintainerExecutorJob job = new MaintainerExecutorJob(jobName); jobs.add(job); return job; } void execute() { String args; try { args = objectMapper.writeValueAsString(jobs); } catch (JsonProcessingException e) { throw new RuntimeException("Failed transform list of maintenance jobs to JSON"); } executeMaintainer("com.yahoo.vespa.hosted.node.maintainer.Maintainer", args); } } private class MaintainerExecutorJob { @JsonProperty(value="type") private final String type; @JsonProperty(value="arguments") private final Map<String, Object> arguments = new HashMap<>(); MaintainerExecutorJob(String type) { this.type = type; } MaintainerExecutorJob withArgument(String argument, Object value) { arguments.put(argument, (value instanceof Path) ? value.toString() : value); return this; } } private MaintenanceThrottler getMaintenanceThrottlerFor(ContainerName containerName) { maintenanceThrottlerByContainerName.putIfAbsent(containerName, new MaintenanceThrottler()); return maintenanceThrottlerByContainerName.get(containerName); } private class MaintenanceThrottler { private Instant nextRemoveOldFilesAt = Instant.EPOCH; private Instant nextHandleOldCoredumpsAt = Instant.EPOCH; void updateNextRemoveOldFilesTime() { nextRemoveOldFilesAt = clock.instant().plus(Duration.ofHours(1)); } boolean shouldRemoveOldFilesNow() { return !nextRemoveOldFilesAt.isAfter(clock.instant()); } void updateNextHandleCoredumpsTime() { nextHandleOldCoredumpsAt = clock.instant().plus(Duration.ofMinutes(5)); } boolean shouldHandleCoredumpsNow() { return !nextHandleOldCoredumpsAt.isAfter(clock.instant()); } void reset() { nextRemoveOldFilesAt = Instant.EPOCH; nextHandleOldCoredumpsAt = Instant.EPOCH; } } }
`pathInNodeUnderVespaHome()` returns absolute path inside the container's file system under `$VESPA_HOME`. This is no different than the `Path` you send it to `SecretAgentCheckConfig` constructor. In the end both will just be written to a file which `secret-agent` running inside the container will parse.
public void writeMetricsConfig(ContainerName containerName, NodeSpec node) { List<SecretAgentCheckConfig> configs = new ArrayList<>(); Path vespaCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa"); SecretAgentCheckConfig vespaSchedule = new SecretAgentCheckConfig("vespa", 60, vespaCheckPath, "all"); configs.add(annotatedCheck(node, vespaSchedule)); Path vespaHealthCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa_health"); SecretAgentCheckConfig vespaHealthSchedule = new SecretAgentCheckConfig("vespa", 60, vespaHealthCheckPath, "all"); configs.add(annotatedCheck(node, vespaHealthSchedule)); Path hostLifeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_host_life"); SecretAgentCheckConfig hostLifeSchedule = new SecretAgentCheckConfig("host-life", 60, hostLifeCheckPath); configs.add(annotatedCheck(node, hostLifeSchedule)); Path ntpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ntp"); SecretAgentCheckConfig ntpSchedule = new SecretAgentCheckConfig("host-life", 60, ntpCheckPath); configs.add(annotatedCheck(node, ntpSchedule)); Path coredumpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_coredumps"); SecretAgentCheckConfig coredumpSchedule = new SecretAgentCheckConfig("system-coredumps-processing", 300, coredumpCheckPath, "--application", "system-coredumps-processing", "--lastmin", "129600", "--crit", "1", "--coredir", "/home/y/var/crash/processing"); configs.add(annotatedCheck(node, coredumpSchedule)); if (node.getNodeType() == NodeType.config) { Path configServerCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ymonsb2"); SecretAgentCheckConfig configServerSchedule = new SecretAgentCheckConfig("configserver", 60, configServerCheckPath, "-zero", "configserver"); configs.add(annotatedCheck(node, configServerSchedule)); Path zkbackupCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig zkbackupSchedule = new SecretAgentCheckConfig("zkbackupage", 300, zkbackupCheckPath, "-f", "/home/y/var/vespa-hosted/zkbackup.stat", "-m", "150", "-a", "config-zkbackupage"); configs.add(annotatedCheck(node, zkbackupSchedule)); } if (node.getNodeType() == NodeType.proxy) { Path routingAgeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig routingAgeSchedule = new SecretAgentCheckConfig("routing-configage", 60, routingAgeCheckPath, "-f", "home/y/var/vespa-hosted/routing/nginx.conf", "-m", "90", "-a", "routing-configage"); configs.add(annotatedCheck(node, routingAgeSchedule)); Path sslCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ssl_status"); SecretAgentCheckConfig sslSchedule = new SecretAgentCheckConfig("zkbackupage", 300, sslCheckPath, "-e", "localhost", "-p", "4443", "-t", "30"); configs.add(annotatedCheck(node, sslSchedule)); } Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/etc/yamas-agent/")); configs.forEach(s -> IOExceptionUtil.uncheck(() -> s.writeTo(yamasAgentFolder))); final String[] restartYamasAgent = new String[]{"service", "yamas-agent", "restart"}; dockerOperations.executeCommandInContainerAsRoot(containerName, restartYamasAgent); }
"129600", "--crit", "1", "--coredir", "/home/y/var/crash/processing");
public void writeMetricsConfig(ContainerName containerName, NodeSpec node) { List<SecretAgentCheckConfig> configs = new ArrayList<>(); Path hostLifeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_host_life"); SecretAgentCheckConfig hostLifeSchedule = new SecretAgentCheckConfig("host-life", 60, hostLifeCheckPath); configs.add(annotatedCheck(node, hostLifeSchedule)); Path ntpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ntp"); SecretAgentCheckConfig ntpSchedule = new SecretAgentCheckConfig("ntp", 60, ntpCheckPath); configs.add(annotatedCheck(node, ntpSchedule)); Path coredumpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_coredumps"); SecretAgentCheckConfig coredumpSchedule = new SecretAgentCheckConfig("system-coredumps-processing", 300, coredumpCheckPath, "--application", "system-coredumps-processing", "--lastmin", "129600", "--crit", "1", "--coredir", environment.pathInNodeUnderVespaHome("var/crash/processing").toString()); configs.add(annotatedCheck(node, coredumpSchedule)); if (node.getNodeType() != NodeType.config) { Path vespaHealthCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa_health"); SecretAgentCheckConfig vespaHealthSchedule = new SecretAgentCheckConfig("vespa-health", 60, vespaHealthCheckPath, "all"); configs.add(annotatedCheck(node, vespaHealthSchedule)); Path vespaCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa"); SecretAgentCheckConfig vespaSchedule = new SecretAgentCheckConfig("vespa", 60, vespaCheckPath, "all"); configs.add(annotatedCheck(node, vespaSchedule)); } if (node.getNodeType() == NodeType.config) { Path configServerCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ymonsb2"); SecretAgentCheckConfig configServerSchedule = new SecretAgentCheckConfig("configserver", 60, configServerCheckPath, "-zero", "configserver"); configs.add(annotatedCheck(node, configServerSchedule)); Path zkbackupCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig zkbackupSchedule = new SecretAgentCheckConfig("zkbackupage", 300, zkbackupCheckPath, "-f", environment.pathInNodeUnderVespaHome("var/vespa-hosted/zkbackup.stat").toString(), "-m", "150", "-a", "config-zkbackupage"); configs.add(annotatedCheck(node, zkbackupSchedule)); } if (node.getNodeType() == NodeType.proxy) { Path routingAgeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig routingAgeSchedule = new SecretAgentCheckConfig("routing-configage", 60, routingAgeCheckPath, "-f", environment.pathInNodeUnderVespaHome("var/vespa-hosted/routing/nginx.conf").toString(), "-m", "90", "-a", "routing-configage"); configs.add(annotatedCheck(node, routingAgeSchedule)); Path sslCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ssl_status"); SecretAgentCheckConfig sslSchedule = new SecretAgentCheckConfig("ssl-status", 300, sslCheckPath, "-e", "localhost", "-p", "4443", "-t", "30"); configs.add(annotatedCheck(node, sslSchedule)); } Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/etc/yamas-agent/")); configs.forEach(s -> IOExceptionUtil.uncheck(() -> s.writeTo(yamasAgentFolder))); final String[] restartYamasAgent = new String[]{"service", "yamas-agent", "restart"}; dockerOperations.executeCommandInContainerAsRoot(containerName, restartYamasAgent); }
class StorageMaintainer { private static final ContainerName NODE_ADMIN = new ContainerName("node-admin"); private static final ObjectMapper objectMapper = new ObjectMapper(); private final GaugeWrapper numberOfCoredumpsOnHost; private final CounterWrapper numberOfNodeAdminMaintenanceFails; private final DockerOperations dockerOperations; private final ProcessExecuter processExecuter; private final Environment environment; private final Clock clock; private Map<ContainerName, MaintenanceThrottler> maintenanceThrottlerByContainerName = new ConcurrentHashMap<>(); public StorageMaintainer(DockerOperations dockerOperations, ProcessExecuter processExecuter, MetricReceiverWrapper metricReceiver, Environment environment, Clock clock) { this.dockerOperations = dockerOperations; this.processExecuter = processExecuter; this.environment = environment; this.clock = clock; Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); numberOfNodeAdminMaintenanceFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.maintenance.fails"); numberOfCoredumpsOnHost = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.coredumps"); } private SecretAgentCheckConfig annotatedCheck(NodeSpec node, SecretAgentCheckConfig check) { check.withTag("namespace", "Vespa") .withTag("role", "tenants") .withTag("flavor", node.getFlavor()) .withTag("canonicalFlavor", node.getCanonicalFlavor()) .withTag("state", node.getState().toString()) .withTag("zone", environment.getZone()) .withTag("parentHostname", environment.getParentHostHostname()); node.getOwner().ifPresent(owner -> check .withTag("tenantName", owner.getTenant()) .withTag("app", owner.getApplication() + "." + owner.getInstance()) .withTag("applicationName", owner.getApplication()) .withTag("instanceName", owner.getInstance()) .withTag("applicationId", owner.getTenant() + "." + owner.getApplication() + "." + owner.getInstance())); node.getMembership().ifPresent(membership -> check .withTag("clustertype", membership.getClusterType()) .withTag("clusterid", membership.getClusterId())); node.getVespaVersion().ifPresent(version -> check.withTag("vespaVersion", version)); return check; } public void writeFilebeatConfig(ContainerName containerName, NodeSpec node) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); try { FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment); Optional<String> config = filebeatConfigProvider.getConfig(node); if (!config.isPresent()) { logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + node.toString()); return; } Path filebeatPath = environment.pathInNodeAdminFromPathInNode( containerName, Paths.get("/etc/filebeat/filebeat.yml")); Files.write(filebeatPath, config.get().getBytes()); logger.info("Wrote filebeat config."); } catch (Throwable t) { logger.error("Failed writing filebeat config; " + node, t); } } public Optional<Long> getDiskUsageFor(ContainerName containerName) { Path containerDir = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/home/")); try { return Optional.of(getDiskUsedInBytes(containerDir)); } catch (Throwable e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); logger.error("Problems during disk usage calculations in " + containerDir.toAbsolutePath(), e); return Optional.empty(); } } long getDiskUsedInBytes(Path path) throws IOException, InterruptedException { if (!Files.exists(path)) { return 0; } final String[] command = {"du", "-xsk", path.toString()}; Process duCommand = new ProcessBuilder().command(command).start(); if (!duCommand.waitFor(60, TimeUnit.SECONDS)) { duCommand.destroy(); throw new RuntimeException("Disk usage command timed out, aborting."); } String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream())); String[] results = output.split("\t"); if (results.length != 2) { throw new RuntimeException("Result from disk usage command not as expected: " + output); } long diskUsageKB = Long.valueOf(results[0]); return diskUsageKB * 1024; } /** * Deletes old log files for vespa, nginx, logstash, etc. */ public void removeOldFilesFromNode(ContainerName containerName) { if (! getMaintenanceThrottlerFor(containerName).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextRemoveOldFilesTime(); } private void addRemoveOldFilesCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName) { Path[] pathsToClean = { environment.pathInNodeUnderVespaHome("logs/elasticsearch2"), environment.pathInNodeUnderVespaHome("logs/logstash2"), environment.pathInNodeUnderVespaHome("logs/daemontools_y"), environment.pathInNodeUnderVespaHome("logs/nginx"), environment.pathInNodeUnderVespaHome("logs/vespa") }; for (Path pathToClean : pathsToClean) { Path path = environment.pathInNodeAdminFromPathInNode(containerName, pathToClean); if (Files.exists(path)) { maintainerExecutor.addJob("delete-files") .withArgument("basePath", path) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*\\.log.+") .withArgument("recursive", false); } } Path qrsDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/qrs")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", qrsDir) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*QueryAccessLog.*") .withArgument("recursive", false); Path logArchiveDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/logarchive")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", logArchiveDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); } /** * Checks if container has any new coredumps, reports and archives them if so * * @param force Set to true to bypass throttling */ public void handleCoreDumpsForContainer(ContainerName containerName, NodeSpec node, boolean force) { IOExceptionUtil.uncheck(() -> numberOfCoredumpsOnHost.sample(Files.list(environment.pathInNodeAdminToDoneCoredumps()).count())); if (! getMaintenanceThrottlerFor(containerName).shouldHandleCoredumpsNow() && !force) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextHandleCoredumpsTime(); } private void addHandleCoredumpsCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName, NodeSpec node) { if (!environment.getCoredumpFeedEndpoint().isPresent()) { return; } Map<String, Object> attributes = new HashMap<>(); attributes.put("hostname", node.getHostname()); attributes.put("parent_hostname", environment.getParentHostHostname()); attributes.put("region", environment.getRegion()); attributes.put("environment", environment.getEnvironment()); attributes.put("flavor", node.getFlavor()); attributes.put("kernel_version", System.getProperty("os.version")); node.getCurrentDockerImage().ifPresent(image -> attributes.put("docker_image", image.asString())); node.getVespaVersion().ifPresent(version -> attributes.put("vespa_version", version)); node.getOwner().ifPresent(owner -> { attributes.put("tenant", owner.getTenant()); attributes.put("application", owner.getApplication()); attributes.put("instance", owner.getInstance()); }); maintainerExecutor.addJob("handle-core-dumps") .withArgument("doneCoredumpsPath", environment.pathInNodeAdminToDoneCoredumps()) .withArgument("coredumpsPath", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/crash"))) .withArgument("feedEndpoint", environment.getCoredumpFeedEndpoint().get()) .withArgument("attributes", attributes); } /** * Deletes old * * archived app data * * Vespa logs * * Filedistribution files */ public void cleanNodeAdmin() { if (! getMaintenanceThrottlerFor(NODE_ADMIN).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); maintainerExecutor.addJob("delete-directories") .withArgument("basePath", environment.getPathResolver().getApplicationStoragePathForNodeAdmin()) .withArgument("maxAgeSeconds", Duration.ofDays(7).getSeconds()) .withArgument("dirNameRegex", "^" + Pattern.quote(Environment.APPLICATION_STORAGE_CLEANUP_PATH_PREFIX)); Path nodeAdminJDiskLogsPath = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("logs/vespa/")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", nodeAdminJDiskLogsPath) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); maintainerExecutor.execute(); getMaintenanceThrottlerFor(NODE_ADMIN).updateNextRemoveOldFilesTime(); } /** * Prepares the container-storage for the next container by deleting/archiving all the data of the current container. * Removes old files, reports coredumps and archives container data, runs when container enters state "dirty" */ public void cleanupNodeStorage(ContainerName containerName, NodeSpec node) { MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); addArchiveNodeData(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).reset(); } private void addArchiveNodeData(MaintainerExecutor maintainerExecutor, ContainerName containerName) { maintainerExecutor.addJob("recursive-delete") .withArgument("path", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var"))); maintainerExecutor.addJob("move-files") .withArgument("from", environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/"))) .withArgument("to", environment.pathInNodeAdminToNodeCleanup(containerName)); } /** * Runs node-maintainer's SpecVerifier and returns its output * @param node Node specification containing the excepted values we want to verify against * @return new combined hardware divergence * @throws RuntimeException if exit code != 0 */ public String getHardwareDivergence(NodeSpec node) { List<String> arguments = new ArrayList<>(Arrays.asList("specification", "--disk", Double.toString(node.getMinDiskAvailableGb()), "--memory", Double.toString(node.getMinMainMemoryAvailableGb()), "--cpu_cores", Double.toString(node.getMinCpuCores()), "--is_ssd", Boolean.toString(node.isFastDisk()), "--ips", String.join(",", node.getIpAddresses()))); node.getHardwareDivergence().ifPresent(hardwareDivergence -> { arguments.add("--divergence"); arguments.add(hardwareDivergence); }); return executeMaintainer("com.yahoo.vespa.hosted.node.verification.Main", arguments.toArray(new String[0])); } private String executeMaintainer(String mainClass, String... args) { String[] command = Stream.concat( Stream.of("sudo", "VESPA_HOME=" + getDefaults().vespaHome(), getDefaults().underVespaHome("libexec/vespa/node-admin/maintenance.sh"), mainClass), Stream.of(args)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(command); if (result.getFirst() != 0) { numberOfNodeAdminMaintenanceFails.add(); throw new RuntimeException( String.format("Maintainer failed to execute command: %s, Exit code: %d, Stdout/stderr: %s", Arrays.toString(command), result.getFirst(), result.getSecond())); } return result.getSecond().trim(); } catch (IOException e) { throw new RuntimeException("Failed to execute maintainer", e); } } /** * Wrapper for node-admin-maintenance, queues up maintenances jobs and sends a single request to maintenance JVM */ private class MaintainerExecutor { private final List<MaintainerExecutorJob> jobs = new ArrayList<>(); MaintainerExecutorJob addJob(String jobName) { MaintainerExecutorJob job = new MaintainerExecutorJob(jobName); jobs.add(job); return job; } void execute() { String args; try { args = objectMapper.writeValueAsString(jobs); } catch (JsonProcessingException e) { throw new RuntimeException("Failed transform list of maintenance jobs to JSON"); } executeMaintainer("com.yahoo.vespa.hosted.node.maintainer.Maintainer", args); } } private class MaintainerExecutorJob { @JsonProperty(value="type") private final String type; @JsonProperty(value="arguments") private final Map<String, Object> arguments = new HashMap<>(); MaintainerExecutorJob(String type) { this.type = type; } MaintainerExecutorJob withArgument(String argument, Object value) { arguments.put(argument, (value instanceof Path) ? value.toString() : value); return this; } } private MaintenanceThrottler getMaintenanceThrottlerFor(ContainerName containerName) { maintenanceThrottlerByContainerName.putIfAbsent(containerName, new MaintenanceThrottler()); return maintenanceThrottlerByContainerName.get(containerName); } private class MaintenanceThrottler { private Instant nextRemoveOldFilesAt = Instant.EPOCH; private Instant nextHandleOldCoredumpsAt = Instant.EPOCH; void updateNextRemoveOldFilesTime() { nextRemoveOldFilesAt = clock.instant().plus(Duration.ofHours(1)); } boolean shouldRemoveOldFilesNow() { return !nextRemoveOldFilesAt.isAfter(clock.instant()); } void updateNextHandleCoredumpsTime() { nextHandleOldCoredumpsAt = clock.instant().plus(Duration.ofMinutes(5)); } boolean shouldHandleCoredumpsNow() { return !nextHandleOldCoredumpsAt.isAfter(clock.instant()); } void reset() { nextRemoveOldFilesAt = Instant.EPOCH; nextHandleOldCoredumpsAt = Instant.EPOCH; } } }
class StorageMaintainer { private static final ContainerName NODE_ADMIN = new ContainerName("node-admin"); private static final ObjectMapper objectMapper = new ObjectMapper(); private final GaugeWrapper numberOfCoredumpsOnHost; private final CounterWrapper numberOfNodeAdminMaintenanceFails; private final DockerOperations dockerOperations; private final ProcessExecuter processExecuter; private final Environment environment; private final Clock clock; private Map<ContainerName, MaintenanceThrottler> maintenanceThrottlerByContainerName = new ConcurrentHashMap<>(); public StorageMaintainer(DockerOperations dockerOperations, ProcessExecuter processExecuter, MetricReceiverWrapper metricReceiver, Environment environment, Clock clock) { this.dockerOperations = dockerOperations; this.processExecuter = processExecuter; this.environment = environment; this.clock = clock; Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); numberOfNodeAdminMaintenanceFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.maintenance.fails"); numberOfCoredumpsOnHost = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.coredumps"); } private SecretAgentCheckConfig annotatedCheck(NodeSpec node, SecretAgentCheckConfig check) { check.withTag("namespace", "Vespa") .withTag("role", "tenants") .withTag("flavor", node.getFlavor()) .withTag("canonicalFlavor", node.getCanonicalFlavor()) .withTag("state", node.getState().toString()) .withTag("zone", environment.getZone()) .withTag("parentHostname", environment.getParentHostHostname()); node.getOwner().ifPresent(owner -> check .withTag("tenantName", owner.getTenant()) .withTag("app", owner.getApplication() + "." + owner.getInstance()) .withTag("applicationName", owner.getApplication()) .withTag("instanceName", owner.getInstance()) .withTag("applicationId", owner.getTenant() + "." + owner.getApplication() + "." + owner.getInstance())); node.getMembership().ifPresent(membership -> check .withTag("clustertype", membership.getClusterType()) .withTag("clusterid", membership.getClusterId())); node.getVespaVersion().ifPresent(version -> check.withTag("vespaVersion", version)); return check; } public void writeFilebeatConfig(ContainerName containerName, NodeSpec node) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); try { FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment); Optional<String> config = filebeatConfigProvider.getConfig(node); if (!config.isPresent()) { logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + node.toString()); return; } Path filebeatPath = environment.pathInNodeAdminFromPathInNode( containerName, Paths.get("/etc/filebeat/filebeat.yml")); Files.write(filebeatPath, config.get().getBytes()); logger.info("Wrote filebeat config."); } catch (Throwable t) { logger.error("Failed writing filebeat config; " + node, t); } } public Optional<Long> getDiskUsageFor(ContainerName containerName) { Path containerDir = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/home/")); try { return Optional.of(getDiskUsedInBytes(containerDir)); } catch (Throwable e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); logger.error("Problems during disk usage calculations in " + containerDir.toAbsolutePath(), e); return Optional.empty(); } } long getDiskUsedInBytes(Path path) throws IOException, InterruptedException { if (!Files.exists(path)) { return 0; } final String[] command = {"du", "-xsk", path.toString()}; Process duCommand = new ProcessBuilder().command(command).start(); if (!duCommand.waitFor(60, TimeUnit.SECONDS)) { duCommand.destroy(); throw new RuntimeException("Disk usage command timed out, aborting."); } String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream())); String[] results = output.split("\t"); if (results.length != 2) { throw new RuntimeException("Result from disk usage command not as expected: " + output); } long diskUsageKB = Long.valueOf(results[0]); return diskUsageKB * 1024; } /** * Deletes old log files for vespa, nginx, logstash, etc. */ public void removeOldFilesFromNode(ContainerName containerName) { if (! getMaintenanceThrottlerFor(containerName).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextRemoveOldFilesTime(); } private void addRemoveOldFilesCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName) { Path[] pathsToClean = { environment.pathInNodeUnderVespaHome("logs/elasticsearch2"), environment.pathInNodeUnderVespaHome("logs/logstash2"), environment.pathInNodeUnderVespaHome("logs/daemontools_y"), environment.pathInNodeUnderVespaHome("logs/nginx"), environment.pathInNodeUnderVespaHome("logs/vespa") }; for (Path pathToClean : pathsToClean) { Path path = environment.pathInNodeAdminFromPathInNode(containerName, pathToClean); if (Files.exists(path)) { maintainerExecutor.addJob("delete-files") .withArgument("basePath", path) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*\\.log.+") .withArgument("recursive", false); } } Path qrsDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/qrs")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", qrsDir) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*QueryAccessLog.*") .withArgument("recursive", false); Path logArchiveDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/logarchive")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", logArchiveDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); } /** * Checks if container has any new coredumps, reports and archives them if so * * @param force Set to true to bypass throttling */ public void handleCoreDumpsForContainer(ContainerName containerName, NodeSpec node, boolean force) { try { numberOfCoredumpsOnHost.sample(Files.list(environment.pathInNodeAdminToDoneCoredumps()).count()); } catch (IOException e) { } if (! getMaintenanceThrottlerFor(containerName).shouldHandleCoredumpsNow() && !force) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextHandleCoredumpsTime(); } private void addHandleCoredumpsCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName, NodeSpec node) { if (!environment.getCoredumpFeedEndpoint().isPresent()) { return; } Map<String, Object> attributes = new HashMap<>(); attributes.put("hostname", node.getHostname()); attributes.put("parent_hostname", environment.getParentHostHostname()); attributes.put("region", environment.getRegion()); attributes.put("environment", environment.getEnvironment()); attributes.put("flavor", node.getFlavor()); attributes.put("kernel_version", System.getProperty("os.version")); node.getCurrentDockerImage().ifPresent(image -> attributes.put("docker_image", image.asString())); node.getVespaVersion().ifPresent(version -> attributes.put("vespa_version", version)); node.getOwner().ifPresent(owner -> { attributes.put("tenant", owner.getTenant()); attributes.put("application", owner.getApplication()); attributes.put("instance", owner.getInstance()); }); maintainerExecutor.addJob("handle-core-dumps") .withArgument("doneCoredumpsPath", environment.pathInNodeAdminToDoneCoredumps()) .withArgument("coredumpsPath", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/crash"))) .withArgument("feedEndpoint", environment.getCoredumpFeedEndpoint().get()) .withArgument("attributes", attributes); } /** * Deletes old * * archived app data * * Vespa logs * * Filedistribution files */ public void cleanNodeAdmin() { if (! getMaintenanceThrottlerFor(NODE_ADMIN).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); maintainerExecutor.addJob("delete-directories") .withArgument("basePath", environment.getPathResolver().getApplicationStoragePathForNodeAdmin()) .withArgument("maxAgeSeconds", Duration.ofDays(7).getSeconds()) .withArgument("dirNameRegex", "^" + Pattern.quote(Environment.APPLICATION_STORAGE_CLEANUP_PATH_PREFIX)); Path nodeAdminJDiskLogsPath = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("logs/vespa/")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", nodeAdminJDiskLogsPath) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); maintainerExecutor.execute(); getMaintenanceThrottlerFor(NODE_ADMIN).updateNextRemoveOldFilesTime(); } /** * Prepares the container-storage for the next container by deleting/archiving all the data of the current container. * Removes old files, reports coredumps and archives container data, runs when container enters state "dirty" */ public void cleanupNodeStorage(ContainerName containerName, NodeSpec node) { MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); addArchiveNodeData(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).reset(); } private void addArchiveNodeData(MaintainerExecutor maintainerExecutor, ContainerName containerName) { maintainerExecutor.addJob("recursive-delete") .withArgument("path", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var"))); maintainerExecutor.addJob("move-files") .withArgument("from", environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/"))) .withArgument("to", environment.pathInNodeAdminToNodeCleanup(containerName)); } /** * Runs node-maintainer's SpecVerifier and returns its output * @param node Node specification containing the excepted values we want to verify against * @return new combined hardware divergence * @throws RuntimeException if exit code != 0 */ public String getHardwareDivergence(NodeSpec node) { List<String> arguments = new ArrayList<>(Arrays.asList("specification", "--disk", Double.toString(node.getMinDiskAvailableGb()), "--memory", Double.toString(node.getMinMainMemoryAvailableGb()), "--cpu_cores", Double.toString(node.getMinCpuCores()), "--is_ssd", Boolean.toString(node.isFastDisk()), "--ips", String.join(",", node.getIpAddresses()))); node.getHardwareDivergence().ifPresent(hardwareDivergence -> { arguments.add("--divergence"); arguments.add(hardwareDivergence); }); return executeMaintainer("com.yahoo.vespa.hosted.node.verification.Main", arguments.toArray(new String[0])); } private String executeMaintainer(String mainClass, String... args) { String[] command = Stream.concat( Stream.of("sudo", "VESPA_HOME=" + getDefaults().vespaHome(), getDefaults().underVespaHome("libexec/vespa/node-admin/maintenance.sh"), mainClass), Stream.of(args)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(command); if (result.getFirst() != 0) { numberOfNodeAdminMaintenanceFails.add(); throw new RuntimeException( String.format("Maintainer failed to execute command: %s, Exit code: %d, Stdout/stderr: %s", Arrays.toString(command), result.getFirst(), result.getSecond())); } return result.getSecond().trim(); } catch (IOException e) { throw new RuntimeException("Failed to execute maintainer", e); } } /** * Wrapper for node-admin-maintenance, queues up maintenances jobs and sends a single request to maintenance JVM */ private class MaintainerExecutor { private final List<MaintainerExecutorJob> jobs = new ArrayList<>(); MaintainerExecutorJob addJob(String jobName) { MaintainerExecutorJob job = new MaintainerExecutorJob(jobName); jobs.add(job); return job; } void execute() { String args; try { args = objectMapper.writeValueAsString(jobs); } catch (JsonProcessingException e) { throw new RuntimeException("Failed transform list of maintenance jobs to JSON"); } executeMaintainer("com.yahoo.vespa.hosted.node.maintainer.Maintainer", args); } } private class MaintainerExecutorJob { @JsonProperty(value="type") private final String type; @JsonProperty(value="arguments") private final Map<String, Object> arguments = new HashMap<>(); MaintainerExecutorJob(String type) { this.type = type; } MaintainerExecutorJob withArgument(String argument, Object value) { arguments.put(argument, (value instanceof Path) ? value.toString() : value); return this; } } private MaintenanceThrottler getMaintenanceThrottlerFor(ContainerName containerName) { maintenanceThrottlerByContainerName.putIfAbsent(containerName, new MaintenanceThrottler()); return maintenanceThrottlerByContainerName.get(containerName); } private class MaintenanceThrottler { private Instant nextRemoveOldFilesAt = Instant.EPOCH; private Instant nextHandleOldCoredumpsAt = Instant.EPOCH; void updateNextRemoveOldFilesTime() { nextRemoveOldFilesAt = clock.instant().plus(Duration.ofHours(1)); } boolean shouldRemoveOldFilesNow() { return !nextRemoveOldFilesAt.isAfter(clock.instant()); } void updateNextHandleCoredumpsTime() { nextHandleOldCoredumpsAt = clock.instant().plus(Duration.ofMinutes(5)); } boolean shouldHandleCoredumpsNow() { return !nextHandleOldCoredumpsAt.isAfter(clock.instant()); } void reset() { nextRemoveOldFilesAt = Instant.EPOCH; nextHandleOldCoredumpsAt = Instant.EPOCH; } } }
Right, I was confused by the pathIn methods. I now see the pathInNodeAdminFromPathInNode used as the pases for the actual file. Will fix
public void writeMetricsConfig(ContainerName containerName, NodeSpec node) { List<SecretAgentCheckConfig> configs = new ArrayList<>(); Path vespaCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa"); SecretAgentCheckConfig vespaSchedule = new SecretAgentCheckConfig("vespa", 60, vespaCheckPath, "all"); configs.add(annotatedCheck(node, vespaSchedule)); Path vespaHealthCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa_health"); SecretAgentCheckConfig vespaHealthSchedule = new SecretAgentCheckConfig("vespa", 60, vespaHealthCheckPath, "all"); configs.add(annotatedCheck(node, vespaHealthSchedule)); Path hostLifeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_host_life"); SecretAgentCheckConfig hostLifeSchedule = new SecretAgentCheckConfig("host-life", 60, hostLifeCheckPath); configs.add(annotatedCheck(node, hostLifeSchedule)); Path ntpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ntp"); SecretAgentCheckConfig ntpSchedule = new SecretAgentCheckConfig("host-life", 60, ntpCheckPath); configs.add(annotatedCheck(node, ntpSchedule)); Path coredumpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_coredumps"); SecretAgentCheckConfig coredumpSchedule = new SecretAgentCheckConfig("system-coredumps-processing", 300, coredumpCheckPath, "--application", "system-coredumps-processing", "--lastmin", "129600", "--crit", "1", "--coredir", "/home/y/var/crash/processing"); configs.add(annotatedCheck(node, coredumpSchedule)); if (node.getNodeType() == NodeType.config) { Path configServerCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ymonsb2"); SecretAgentCheckConfig configServerSchedule = new SecretAgentCheckConfig("configserver", 60, configServerCheckPath, "-zero", "configserver"); configs.add(annotatedCheck(node, configServerSchedule)); Path zkbackupCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig zkbackupSchedule = new SecretAgentCheckConfig("zkbackupage", 300, zkbackupCheckPath, "-f", "/home/y/var/vespa-hosted/zkbackup.stat", "-m", "150", "-a", "config-zkbackupage"); configs.add(annotatedCheck(node, zkbackupSchedule)); } if (node.getNodeType() == NodeType.proxy) { Path routingAgeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig routingAgeSchedule = new SecretAgentCheckConfig("routing-configage", 60, routingAgeCheckPath, "-f", "home/y/var/vespa-hosted/routing/nginx.conf", "-m", "90", "-a", "routing-configage"); configs.add(annotatedCheck(node, routingAgeSchedule)); Path sslCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ssl_status"); SecretAgentCheckConfig sslSchedule = new SecretAgentCheckConfig("zkbackupage", 300, sslCheckPath, "-e", "localhost", "-p", "4443", "-t", "30"); configs.add(annotatedCheck(node, sslSchedule)); } Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/etc/yamas-agent/")); configs.forEach(s -> IOExceptionUtil.uncheck(() -> s.writeTo(yamasAgentFolder))); final String[] restartYamasAgent = new String[]{"service", "yamas-agent", "restart"}; dockerOperations.executeCommandInContainerAsRoot(containerName, restartYamasAgent); }
"129600", "--crit", "1", "--coredir", "/home/y/var/crash/processing");
public void writeMetricsConfig(ContainerName containerName, NodeSpec node) { List<SecretAgentCheckConfig> configs = new ArrayList<>(); Path hostLifeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_host_life"); SecretAgentCheckConfig hostLifeSchedule = new SecretAgentCheckConfig("host-life", 60, hostLifeCheckPath); configs.add(annotatedCheck(node, hostLifeSchedule)); Path ntpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ntp"); SecretAgentCheckConfig ntpSchedule = new SecretAgentCheckConfig("ntp", 60, ntpCheckPath); configs.add(annotatedCheck(node, ntpSchedule)); Path coredumpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_coredumps"); SecretAgentCheckConfig coredumpSchedule = new SecretAgentCheckConfig("system-coredumps-processing", 300, coredumpCheckPath, "--application", "system-coredumps-processing", "--lastmin", "129600", "--crit", "1", "--coredir", environment.pathInNodeUnderVespaHome("var/crash/processing").toString()); configs.add(annotatedCheck(node, coredumpSchedule)); if (node.getNodeType() != NodeType.config) { Path vespaHealthCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa_health"); SecretAgentCheckConfig vespaHealthSchedule = new SecretAgentCheckConfig("vespa-health", 60, vespaHealthCheckPath, "all"); configs.add(annotatedCheck(node, vespaHealthSchedule)); Path vespaCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa"); SecretAgentCheckConfig vespaSchedule = new SecretAgentCheckConfig("vespa", 60, vespaCheckPath, "all"); configs.add(annotatedCheck(node, vespaSchedule)); } if (node.getNodeType() == NodeType.config) { Path configServerCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ymonsb2"); SecretAgentCheckConfig configServerSchedule = new SecretAgentCheckConfig("configserver", 60, configServerCheckPath, "-zero", "configserver"); configs.add(annotatedCheck(node, configServerSchedule)); Path zkbackupCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig zkbackupSchedule = new SecretAgentCheckConfig("zkbackupage", 300, zkbackupCheckPath, "-f", environment.pathInNodeUnderVespaHome("var/vespa-hosted/zkbackup.stat").toString(), "-m", "150", "-a", "config-zkbackupage"); configs.add(annotatedCheck(node, zkbackupSchedule)); } if (node.getNodeType() == NodeType.proxy) { Path routingAgeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig routingAgeSchedule = new SecretAgentCheckConfig("routing-configage", 60, routingAgeCheckPath, "-f", environment.pathInNodeUnderVespaHome("var/vespa-hosted/routing/nginx.conf").toString(), "-m", "90", "-a", "routing-configage"); configs.add(annotatedCheck(node, routingAgeSchedule)); Path sslCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ssl_status"); SecretAgentCheckConfig sslSchedule = new SecretAgentCheckConfig("ssl-status", 300, sslCheckPath, "-e", "localhost", "-p", "4443", "-t", "30"); configs.add(annotatedCheck(node, sslSchedule)); } Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/etc/yamas-agent/")); configs.forEach(s -> IOExceptionUtil.uncheck(() -> s.writeTo(yamasAgentFolder))); final String[] restartYamasAgent = new String[]{"service", "yamas-agent", "restart"}; dockerOperations.executeCommandInContainerAsRoot(containerName, restartYamasAgent); }
class StorageMaintainer { private static final ContainerName NODE_ADMIN = new ContainerName("node-admin"); private static final ObjectMapper objectMapper = new ObjectMapper(); private final GaugeWrapper numberOfCoredumpsOnHost; private final CounterWrapper numberOfNodeAdminMaintenanceFails; private final DockerOperations dockerOperations; private final ProcessExecuter processExecuter; private final Environment environment; private final Clock clock; private Map<ContainerName, MaintenanceThrottler> maintenanceThrottlerByContainerName = new ConcurrentHashMap<>(); public StorageMaintainer(DockerOperations dockerOperations, ProcessExecuter processExecuter, MetricReceiverWrapper metricReceiver, Environment environment, Clock clock) { this.dockerOperations = dockerOperations; this.processExecuter = processExecuter; this.environment = environment; this.clock = clock; Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); numberOfNodeAdminMaintenanceFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.maintenance.fails"); numberOfCoredumpsOnHost = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.coredumps"); } private SecretAgentCheckConfig annotatedCheck(NodeSpec node, SecretAgentCheckConfig check) { check.withTag("namespace", "Vespa") .withTag("role", "tenants") .withTag("flavor", node.getFlavor()) .withTag("canonicalFlavor", node.getCanonicalFlavor()) .withTag("state", node.getState().toString()) .withTag("zone", environment.getZone()) .withTag("parentHostname", environment.getParentHostHostname()); node.getOwner().ifPresent(owner -> check .withTag("tenantName", owner.getTenant()) .withTag("app", owner.getApplication() + "." + owner.getInstance()) .withTag("applicationName", owner.getApplication()) .withTag("instanceName", owner.getInstance()) .withTag("applicationId", owner.getTenant() + "." + owner.getApplication() + "." + owner.getInstance())); node.getMembership().ifPresent(membership -> check .withTag("clustertype", membership.getClusterType()) .withTag("clusterid", membership.getClusterId())); node.getVespaVersion().ifPresent(version -> check.withTag("vespaVersion", version)); return check; } public void writeFilebeatConfig(ContainerName containerName, NodeSpec node) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); try { FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment); Optional<String> config = filebeatConfigProvider.getConfig(node); if (!config.isPresent()) { logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + node.toString()); return; } Path filebeatPath = environment.pathInNodeAdminFromPathInNode( containerName, Paths.get("/etc/filebeat/filebeat.yml")); Files.write(filebeatPath, config.get().getBytes()); logger.info("Wrote filebeat config."); } catch (Throwable t) { logger.error("Failed writing filebeat config; " + node, t); } } public Optional<Long> getDiskUsageFor(ContainerName containerName) { Path containerDir = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/home/")); try { return Optional.of(getDiskUsedInBytes(containerDir)); } catch (Throwable e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); logger.error("Problems during disk usage calculations in " + containerDir.toAbsolutePath(), e); return Optional.empty(); } } long getDiskUsedInBytes(Path path) throws IOException, InterruptedException { if (!Files.exists(path)) { return 0; } final String[] command = {"du", "-xsk", path.toString()}; Process duCommand = new ProcessBuilder().command(command).start(); if (!duCommand.waitFor(60, TimeUnit.SECONDS)) { duCommand.destroy(); throw new RuntimeException("Disk usage command timed out, aborting."); } String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream())); String[] results = output.split("\t"); if (results.length != 2) { throw new RuntimeException("Result from disk usage command not as expected: " + output); } long diskUsageKB = Long.valueOf(results[0]); return diskUsageKB * 1024; } /** * Deletes old log files for vespa, nginx, logstash, etc. */ public void removeOldFilesFromNode(ContainerName containerName) { if (! getMaintenanceThrottlerFor(containerName).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextRemoveOldFilesTime(); } private void addRemoveOldFilesCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName) { Path[] pathsToClean = { environment.pathInNodeUnderVespaHome("logs/elasticsearch2"), environment.pathInNodeUnderVespaHome("logs/logstash2"), environment.pathInNodeUnderVespaHome("logs/daemontools_y"), environment.pathInNodeUnderVespaHome("logs/nginx"), environment.pathInNodeUnderVespaHome("logs/vespa") }; for (Path pathToClean : pathsToClean) { Path path = environment.pathInNodeAdminFromPathInNode(containerName, pathToClean); if (Files.exists(path)) { maintainerExecutor.addJob("delete-files") .withArgument("basePath", path) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*\\.log.+") .withArgument("recursive", false); } } Path qrsDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/qrs")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", qrsDir) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*QueryAccessLog.*") .withArgument("recursive", false); Path logArchiveDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/logarchive")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", logArchiveDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); } /** * Checks if container has any new coredumps, reports and archives them if so * * @param force Set to true to bypass throttling */ public void handleCoreDumpsForContainer(ContainerName containerName, NodeSpec node, boolean force) { IOExceptionUtil.uncheck(() -> numberOfCoredumpsOnHost.sample(Files.list(environment.pathInNodeAdminToDoneCoredumps()).count())); if (! getMaintenanceThrottlerFor(containerName).shouldHandleCoredumpsNow() && !force) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextHandleCoredumpsTime(); } private void addHandleCoredumpsCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName, NodeSpec node) { if (!environment.getCoredumpFeedEndpoint().isPresent()) { return; } Map<String, Object> attributes = new HashMap<>(); attributes.put("hostname", node.getHostname()); attributes.put("parent_hostname", environment.getParentHostHostname()); attributes.put("region", environment.getRegion()); attributes.put("environment", environment.getEnvironment()); attributes.put("flavor", node.getFlavor()); attributes.put("kernel_version", System.getProperty("os.version")); node.getCurrentDockerImage().ifPresent(image -> attributes.put("docker_image", image.asString())); node.getVespaVersion().ifPresent(version -> attributes.put("vespa_version", version)); node.getOwner().ifPresent(owner -> { attributes.put("tenant", owner.getTenant()); attributes.put("application", owner.getApplication()); attributes.put("instance", owner.getInstance()); }); maintainerExecutor.addJob("handle-core-dumps") .withArgument("doneCoredumpsPath", environment.pathInNodeAdminToDoneCoredumps()) .withArgument("coredumpsPath", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/crash"))) .withArgument("feedEndpoint", environment.getCoredumpFeedEndpoint().get()) .withArgument("attributes", attributes); } /** * Deletes old * * archived app data * * Vespa logs * * Filedistribution files */ public void cleanNodeAdmin() { if (! getMaintenanceThrottlerFor(NODE_ADMIN).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); maintainerExecutor.addJob("delete-directories") .withArgument("basePath", environment.getPathResolver().getApplicationStoragePathForNodeAdmin()) .withArgument("maxAgeSeconds", Duration.ofDays(7).getSeconds()) .withArgument("dirNameRegex", "^" + Pattern.quote(Environment.APPLICATION_STORAGE_CLEANUP_PATH_PREFIX)); Path nodeAdminJDiskLogsPath = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("logs/vespa/")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", nodeAdminJDiskLogsPath) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); maintainerExecutor.execute(); getMaintenanceThrottlerFor(NODE_ADMIN).updateNextRemoveOldFilesTime(); } /** * Prepares the container-storage for the next container by deleting/archiving all the data of the current container. * Removes old files, reports coredumps and archives container data, runs when container enters state "dirty" */ public void cleanupNodeStorage(ContainerName containerName, NodeSpec node) { MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); addArchiveNodeData(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).reset(); } private void addArchiveNodeData(MaintainerExecutor maintainerExecutor, ContainerName containerName) { maintainerExecutor.addJob("recursive-delete") .withArgument("path", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var"))); maintainerExecutor.addJob("move-files") .withArgument("from", environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/"))) .withArgument("to", environment.pathInNodeAdminToNodeCleanup(containerName)); } /** * Runs node-maintainer's SpecVerifier and returns its output * @param node Node specification containing the excepted values we want to verify against * @return new combined hardware divergence * @throws RuntimeException if exit code != 0 */ public String getHardwareDivergence(NodeSpec node) { List<String> arguments = new ArrayList<>(Arrays.asList("specification", "--disk", Double.toString(node.getMinDiskAvailableGb()), "--memory", Double.toString(node.getMinMainMemoryAvailableGb()), "--cpu_cores", Double.toString(node.getMinCpuCores()), "--is_ssd", Boolean.toString(node.isFastDisk()), "--ips", String.join(",", node.getIpAddresses()))); node.getHardwareDivergence().ifPresent(hardwareDivergence -> { arguments.add("--divergence"); arguments.add(hardwareDivergence); }); return executeMaintainer("com.yahoo.vespa.hosted.node.verification.Main", arguments.toArray(new String[0])); } private String executeMaintainer(String mainClass, String... args) { String[] command = Stream.concat( Stream.of("sudo", "VESPA_HOME=" + getDefaults().vespaHome(), getDefaults().underVespaHome("libexec/vespa/node-admin/maintenance.sh"), mainClass), Stream.of(args)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(command); if (result.getFirst() != 0) { numberOfNodeAdminMaintenanceFails.add(); throw new RuntimeException( String.format("Maintainer failed to execute command: %s, Exit code: %d, Stdout/stderr: %s", Arrays.toString(command), result.getFirst(), result.getSecond())); } return result.getSecond().trim(); } catch (IOException e) { throw new RuntimeException("Failed to execute maintainer", e); } } /** * Wrapper for node-admin-maintenance, queues up maintenances jobs and sends a single request to maintenance JVM */ private class MaintainerExecutor { private final List<MaintainerExecutorJob> jobs = new ArrayList<>(); MaintainerExecutorJob addJob(String jobName) { MaintainerExecutorJob job = new MaintainerExecutorJob(jobName); jobs.add(job); return job; } void execute() { String args; try { args = objectMapper.writeValueAsString(jobs); } catch (JsonProcessingException e) { throw new RuntimeException("Failed transform list of maintenance jobs to JSON"); } executeMaintainer("com.yahoo.vespa.hosted.node.maintainer.Maintainer", args); } } private class MaintainerExecutorJob { @JsonProperty(value="type") private final String type; @JsonProperty(value="arguments") private final Map<String, Object> arguments = new HashMap<>(); MaintainerExecutorJob(String type) { this.type = type; } MaintainerExecutorJob withArgument(String argument, Object value) { arguments.put(argument, (value instanceof Path) ? value.toString() : value); return this; } } private MaintenanceThrottler getMaintenanceThrottlerFor(ContainerName containerName) { maintenanceThrottlerByContainerName.putIfAbsent(containerName, new MaintenanceThrottler()); return maintenanceThrottlerByContainerName.get(containerName); } private class MaintenanceThrottler { private Instant nextRemoveOldFilesAt = Instant.EPOCH; private Instant nextHandleOldCoredumpsAt = Instant.EPOCH; void updateNextRemoveOldFilesTime() { nextRemoveOldFilesAt = clock.instant().plus(Duration.ofHours(1)); } boolean shouldRemoveOldFilesNow() { return !nextRemoveOldFilesAt.isAfter(clock.instant()); } void updateNextHandleCoredumpsTime() { nextHandleOldCoredumpsAt = clock.instant().plus(Duration.ofMinutes(5)); } boolean shouldHandleCoredumpsNow() { return !nextHandleOldCoredumpsAt.isAfter(clock.instant()); } void reset() { nextRemoveOldFilesAt = Instant.EPOCH; nextHandleOldCoredumpsAt = Instant.EPOCH; } } }
class StorageMaintainer { private static final ContainerName NODE_ADMIN = new ContainerName("node-admin"); private static final ObjectMapper objectMapper = new ObjectMapper(); private final GaugeWrapper numberOfCoredumpsOnHost; private final CounterWrapper numberOfNodeAdminMaintenanceFails; private final DockerOperations dockerOperations; private final ProcessExecuter processExecuter; private final Environment environment; private final Clock clock; private Map<ContainerName, MaintenanceThrottler> maintenanceThrottlerByContainerName = new ConcurrentHashMap<>(); public StorageMaintainer(DockerOperations dockerOperations, ProcessExecuter processExecuter, MetricReceiverWrapper metricReceiver, Environment environment, Clock clock) { this.dockerOperations = dockerOperations; this.processExecuter = processExecuter; this.environment = environment; this.clock = clock; Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); numberOfNodeAdminMaintenanceFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.maintenance.fails"); numberOfCoredumpsOnHost = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.coredumps"); } private SecretAgentCheckConfig annotatedCheck(NodeSpec node, SecretAgentCheckConfig check) { check.withTag("namespace", "Vespa") .withTag("role", "tenants") .withTag("flavor", node.getFlavor()) .withTag("canonicalFlavor", node.getCanonicalFlavor()) .withTag("state", node.getState().toString()) .withTag("zone", environment.getZone()) .withTag("parentHostname", environment.getParentHostHostname()); node.getOwner().ifPresent(owner -> check .withTag("tenantName", owner.getTenant()) .withTag("app", owner.getApplication() + "." + owner.getInstance()) .withTag("applicationName", owner.getApplication()) .withTag("instanceName", owner.getInstance()) .withTag("applicationId", owner.getTenant() + "." + owner.getApplication() + "." + owner.getInstance())); node.getMembership().ifPresent(membership -> check .withTag("clustertype", membership.getClusterType()) .withTag("clusterid", membership.getClusterId())); node.getVespaVersion().ifPresent(version -> check.withTag("vespaVersion", version)); return check; } public void writeFilebeatConfig(ContainerName containerName, NodeSpec node) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); try { FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment); Optional<String> config = filebeatConfigProvider.getConfig(node); if (!config.isPresent()) { logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + node.toString()); return; } Path filebeatPath = environment.pathInNodeAdminFromPathInNode( containerName, Paths.get("/etc/filebeat/filebeat.yml")); Files.write(filebeatPath, config.get().getBytes()); logger.info("Wrote filebeat config."); } catch (Throwable t) { logger.error("Failed writing filebeat config; " + node, t); } } public Optional<Long> getDiskUsageFor(ContainerName containerName) { Path containerDir = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/home/")); try { return Optional.of(getDiskUsedInBytes(containerDir)); } catch (Throwable e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); logger.error("Problems during disk usage calculations in " + containerDir.toAbsolutePath(), e); return Optional.empty(); } } long getDiskUsedInBytes(Path path) throws IOException, InterruptedException { if (!Files.exists(path)) { return 0; } final String[] command = {"du", "-xsk", path.toString()}; Process duCommand = new ProcessBuilder().command(command).start(); if (!duCommand.waitFor(60, TimeUnit.SECONDS)) { duCommand.destroy(); throw new RuntimeException("Disk usage command timed out, aborting."); } String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream())); String[] results = output.split("\t"); if (results.length != 2) { throw new RuntimeException("Result from disk usage command not as expected: " + output); } long diskUsageKB = Long.valueOf(results[0]); return diskUsageKB * 1024; } /** * Deletes old log files for vespa, nginx, logstash, etc. */ public void removeOldFilesFromNode(ContainerName containerName) { if (! getMaintenanceThrottlerFor(containerName).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextRemoveOldFilesTime(); } private void addRemoveOldFilesCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName) { Path[] pathsToClean = { environment.pathInNodeUnderVespaHome("logs/elasticsearch2"), environment.pathInNodeUnderVespaHome("logs/logstash2"), environment.pathInNodeUnderVespaHome("logs/daemontools_y"), environment.pathInNodeUnderVespaHome("logs/nginx"), environment.pathInNodeUnderVespaHome("logs/vespa") }; for (Path pathToClean : pathsToClean) { Path path = environment.pathInNodeAdminFromPathInNode(containerName, pathToClean); if (Files.exists(path)) { maintainerExecutor.addJob("delete-files") .withArgument("basePath", path) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*\\.log.+") .withArgument("recursive", false); } } Path qrsDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/qrs")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", qrsDir) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*QueryAccessLog.*") .withArgument("recursive", false); Path logArchiveDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/logarchive")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", logArchiveDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); } /** * Checks if container has any new coredumps, reports and archives them if so * * @param force Set to true to bypass throttling */ public void handleCoreDumpsForContainer(ContainerName containerName, NodeSpec node, boolean force) { try { numberOfCoredumpsOnHost.sample(Files.list(environment.pathInNodeAdminToDoneCoredumps()).count()); } catch (IOException e) { } if (! getMaintenanceThrottlerFor(containerName).shouldHandleCoredumpsNow() && !force) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextHandleCoredumpsTime(); } private void addHandleCoredumpsCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName, NodeSpec node) { if (!environment.getCoredumpFeedEndpoint().isPresent()) { return; } Map<String, Object> attributes = new HashMap<>(); attributes.put("hostname", node.getHostname()); attributes.put("parent_hostname", environment.getParentHostHostname()); attributes.put("region", environment.getRegion()); attributes.put("environment", environment.getEnvironment()); attributes.put("flavor", node.getFlavor()); attributes.put("kernel_version", System.getProperty("os.version")); node.getCurrentDockerImage().ifPresent(image -> attributes.put("docker_image", image.asString())); node.getVespaVersion().ifPresent(version -> attributes.put("vespa_version", version)); node.getOwner().ifPresent(owner -> { attributes.put("tenant", owner.getTenant()); attributes.put("application", owner.getApplication()); attributes.put("instance", owner.getInstance()); }); maintainerExecutor.addJob("handle-core-dumps") .withArgument("doneCoredumpsPath", environment.pathInNodeAdminToDoneCoredumps()) .withArgument("coredumpsPath", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/crash"))) .withArgument("feedEndpoint", environment.getCoredumpFeedEndpoint().get()) .withArgument("attributes", attributes); } /** * Deletes old * * archived app data * * Vespa logs * * Filedistribution files */ public void cleanNodeAdmin() { if (! getMaintenanceThrottlerFor(NODE_ADMIN).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); maintainerExecutor.addJob("delete-directories") .withArgument("basePath", environment.getPathResolver().getApplicationStoragePathForNodeAdmin()) .withArgument("maxAgeSeconds", Duration.ofDays(7).getSeconds()) .withArgument("dirNameRegex", "^" + Pattern.quote(Environment.APPLICATION_STORAGE_CLEANUP_PATH_PREFIX)); Path nodeAdminJDiskLogsPath = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("logs/vespa/")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", nodeAdminJDiskLogsPath) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); maintainerExecutor.execute(); getMaintenanceThrottlerFor(NODE_ADMIN).updateNextRemoveOldFilesTime(); } /** * Prepares the container-storage for the next container by deleting/archiving all the data of the current container. * Removes old files, reports coredumps and archives container data, runs when container enters state "dirty" */ public void cleanupNodeStorage(ContainerName containerName, NodeSpec node) { MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); addArchiveNodeData(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).reset(); } private void addArchiveNodeData(MaintainerExecutor maintainerExecutor, ContainerName containerName) { maintainerExecutor.addJob("recursive-delete") .withArgument("path", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var"))); maintainerExecutor.addJob("move-files") .withArgument("from", environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/"))) .withArgument("to", environment.pathInNodeAdminToNodeCleanup(containerName)); } /** * Runs node-maintainer's SpecVerifier and returns its output * @param node Node specification containing the excepted values we want to verify against * @return new combined hardware divergence * @throws RuntimeException if exit code != 0 */ public String getHardwareDivergence(NodeSpec node) { List<String> arguments = new ArrayList<>(Arrays.asList("specification", "--disk", Double.toString(node.getMinDiskAvailableGb()), "--memory", Double.toString(node.getMinMainMemoryAvailableGb()), "--cpu_cores", Double.toString(node.getMinCpuCores()), "--is_ssd", Boolean.toString(node.isFastDisk()), "--ips", String.join(",", node.getIpAddresses()))); node.getHardwareDivergence().ifPresent(hardwareDivergence -> { arguments.add("--divergence"); arguments.add(hardwareDivergence); }); return executeMaintainer("com.yahoo.vespa.hosted.node.verification.Main", arguments.toArray(new String[0])); } private String executeMaintainer(String mainClass, String... args) { String[] command = Stream.concat( Stream.of("sudo", "VESPA_HOME=" + getDefaults().vespaHome(), getDefaults().underVespaHome("libexec/vespa/node-admin/maintenance.sh"), mainClass), Stream.of(args)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(command); if (result.getFirst() != 0) { numberOfNodeAdminMaintenanceFails.add(); throw new RuntimeException( String.format("Maintainer failed to execute command: %s, Exit code: %d, Stdout/stderr: %s", Arrays.toString(command), result.getFirst(), result.getSecond())); } return result.getSecond().trim(); } catch (IOException e) { throw new RuntimeException("Failed to execute maintainer", e); } } /** * Wrapper for node-admin-maintenance, queues up maintenances jobs and sends a single request to maintenance JVM */ private class MaintainerExecutor { private final List<MaintainerExecutorJob> jobs = new ArrayList<>(); MaintainerExecutorJob addJob(String jobName) { MaintainerExecutorJob job = new MaintainerExecutorJob(jobName); jobs.add(job); return job; } void execute() { String args; try { args = objectMapper.writeValueAsString(jobs); } catch (JsonProcessingException e) { throw new RuntimeException("Failed transform list of maintenance jobs to JSON"); } executeMaintainer("com.yahoo.vespa.hosted.node.maintainer.Maintainer", args); } } private class MaintainerExecutorJob { @JsonProperty(value="type") private final String type; @JsonProperty(value="arguments") private final Map<String, Object> arguments = new HashMap<>(); MaintainerExecutorJob(String type) { this.type = type; } MaintainerExecutorJob withArgument(String argument, Object value) { arguments.put(argument, (value instanceof Path) ? value.toString() : value); return this; } } private MaintenanceThrottler getMaintenanceThrottlerFor(ContainerName containerName) { maintenanceThrottlerByContainerName.putIfAbsent(containerName, new MaintenanceThrottler()); return maintenanceThrottlerByContainerName.get(containerName); } private class MaintenanceThrottler { private Instant nextRemoveOldFilesAt = Instant.EPOCH; private Instant nextHandleOldCoredumpsAt = Instant.EPOCH; void updateNextRemoveOldFilesTime() { nextRemoveOldFilesAt = clock.instant().plus(Duration.ofHours(1)); } boolean shouldRemoveOldFilesNow() { return !nextRemoveOldFilesAt.isAfter(clock.instant()); } void updateNextHandleCoredumpsTime() { nextHandleOldCoredumpsAt = clock.instant().plus(Duration.ofMinutes(5)); } boolean shouldHandleCoredumpsNow() { return !nextHandleOldCoredumpsAt.isAfter(clock.instant()); } void reset() { nextRemoveOldFilesAt = Instant.EPOCH; nextHandleOldCoredumpsAt = Instant.EPOCH; } } }
`vespa-health`
public void writeMetricsConfig(ContainerName containerName, NodeSpec node) { List<SecretAgentCheckConfig> configs = new ArrayList<>(); Path hostLifeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_host_life"); SecretAgentCheckConfig hostLifeSchedule = new SecretAgentCheckConfig("host-life", 60, hostLifeCheckPath); configs.add(annotatedCheck(node, hostLifeSchedule)); Path ntpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ntp"); SecretAgentCheckConfig ntpSchedule = new SecretAgentCheckConfig("ntp", 60, ntpCheckPath); configs.add(annotatedCheck(node, ntpSchedule)); Path coredumpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_coredumps"); SecretAgentCheckConfig coredumpSchedule = new SecretAgentCheckConfig("system-coredumps-processing", 300, coredumpCheckPath, "--application", "system-coredumps-processing", "--lastmin", "129600", "--crit", "1", "--coredir", environment.pathInNodeUnderVespaHome("var/crash/processing").toString()); configs.add(annotatedCheck(node, coredumpSchedule)); if (node.getNodeType() != NodeType.config) { Path vespaHealthCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa_health"); SecretAgentCheckConfig vespaHealthSchedule = new SecretAgentCheckConfig("vespa", 60, vespaHealthCheckPath, "all"); configs.add(annotatedCheck(node, vespaHealthSchedule)); Path vespaCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa"); SecretAgentCheckConfig vespaSchedule = new SecretAgentCheckConfig("vespa", 60, vespaCheckPath, "all"); configs.add(annotatedCheck(node, vespaSchedule)); } if (node.getNodeType() == NodeType.config) { Path configServerCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ymonsb2"); SecretAgentCheckConfig configServerSchedule = new SecretAgentCheckConfig("configserver", 60, configServerCheckPath, "-zero", "configserver"); configs.add(annotatedCheck(node, configServerSchedule)); Path zkbackupCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig zkbackupSchedule = new SecretAgentCheckConfig("zkbackupage", 300, zkbackupCheckPath, "-f", environment.pathInNodeUnderVespaHome("var/vespa-hosted/zkbackup.stat").toString(), "-m", "150", "-a", "config-zkbackupage"); configs.add(annotatedCheck(node, zkbackupSchedule)); } if (node.getNodeType() == NodeType.proxy) { Path routingAgeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig routingAgeSchedule = new SecretAgentCheckConfig("routing-configage", 60, routingAgeCheckPath, "-f", environment.pathInNodeUnderVespaHome("var/vespa-hosted/routing/nginx.conf").toString(), "-m", "90", "-a", "routing-configage"); configs.add(annotatedCheck(node, routingAgeSchedule)); Path sslCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ssl_status"); SecretAgentCheckConfig sslSchedule = new SecretAgentCheckConfig("ssh-status", 300, sslCheckPath, "-e", "localhost", "-p", "4443", "-t", "30"); configs.add(annotatedCheck(node, sslSchedule)); } Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/etc/yamas-agent/")); configs.forEach(s -> IOExceptionUtil.uncheck(() -> s.writeTo(yamasAgentFolder))); final String[] restartYamasAgent = new String[]{"service", "yamas-agent", "restart"}; dockerOperations.executeCommandInContainerAsRoot(containerName, restartYamasAgent); }
SecretAgentCheckConfig vespaHealthSchedule = new SecretAgentCheckConfig("vespa", 60, vespaHealthCheckPath, "all");
public void writeMetricsConfig(ContainerName containerName, NodeSpec node) { List<SecretAgentCheckConfig> configs = new ArrayList<>(); Path hostLifeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_host_life"); SecretAgentCheckConfig hostLifeSchedule = new SecretAgentCheckConfig("host-life", 60, hostLifeCheckPath); configs.add(annotatedCheck(node, hostLifeSchedule)); Path ntpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ntp"); SecretAgentCheckConfig ntpSchedule = new SecretAgentCheckConfig("ntp", 60, ntpCheckPath); configs.add(annotatedCheck(node, ntpSchedule)); Path coredumpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_coredumps"); SecretAgentCheckConfig coredumpSchedule = new SecretAgentCheckConfig("system-coredumps-processing", 300, coredumpCheckPath, "--application", "system-coredumps-processing", "--lastmin", "129600", "--crit", "1", "--coredir", environment.pathInNodeUnderVespaHome("var/crash/processing").toString()); configs.add(annotatedCheck(node, coredumpSchedule)); if (node.getNodeType() != NodeType.config) { Path vespaHealthCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa_health"); SecretAgentCheckConfig vespaHealthSchedule = new SecretAgentCheckConfig("vespa-health", 60, vespaHealthCheckPath, "all"); configs.add(annotatedCheck(node, vespaHealthSchedule)); Path vespaCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa"); SecretAgentCheckConfig vespaSchedule = new SecretAgentCheckConfig("vespa", 60, vespaCheckPath, "all"); configs.add(annotatedCheck(node, vespaSchedule)); } if (node.getNodeType() == NodeType.config) { Path configServerCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ymonsb2"); SecretAgentCheckConfig configServerSchedule = new SecretAgentCheckConfig("configserver", 60, configServerCheckPath, "-zero", "configserver"); configs.add(annotatedCheck(node, configServerSchedule)); Path zkbackupCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig zkbackupSchedule = new SecretAgentCheckConfig("zkbackupage", 300, zkbackupCheckPath, "-f", environment.pathInNodeUnderVespaHome("var/vespa-hosted/zkbackup.stat").toString(), "-m", "150", "-a", "config-zkbackupage"); configs.add(annotatedCheck(node, zkbackupSchedule)); } if (node.getNodeType() == NodeType.proxy) { Path routingAgeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig routingAgeSchedule = new SecretAgentCheckConfig("routing-configage", 60, routingAgeCheckPath, "-f", environment.pathInNodeUnderVespaHome("var/vespa-hosted/routing/nginx.conf").toString(), "-m", "90", "-a", "routing-configage"); configs.add(annotatedCheck(node, routingAgeSchedule)); Path sslCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ssl_status"); SecretAgentCheckConfig sslSchedule = new SecretAgentCheckConfig("ssl-status", 300, sslCheckPath, "-e", "localhost", "-p", "4443", "-t", "30"); configs.add(annotatedCheck(node, sslSchedule)); } Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/etc/yamas-agent/")); configs.forEach(s -> IOExceptionUtil.uncheck(() -> s.writeTo(yamasAgentFolder))); final String[] restartYamasAgent = new String[]{"service", "yamas-agent", "restart"}; dockerOperations.executeCommandInContainerAsRoot(containerName, restartYamasAgent); }
class StorageMaintainer { private static final ContainerName NODE_ADMIN = new ContainerName("node-admin"); private static final ObjectMapper objectMapper = new ObjectMapper(); private final GaugeWrapper numberOfCoredumpsOnHost; private final CounterWrapper numberOfNodeAdminMaintenanceFails; private final DockerOperations dockerOperations; private final ProcessExecuter processExecuter; private final Environment environment; private final Clock clock; private Map<ContainerName, MaintenanceThrottler> maintenanceThrottlerByContainerName = new ConcurrentHashMap<>(); public StorageMaintainer(DockerOperations dockerOperations, ProcessExecuter processExecuter, MetricReceiverWrapper metricReceiver, Environment environment, Clock clock) { this.dockerOperations = dockerOperations; this.processExecuter = processExecuter; this.environment = environment; this.clock = clock; Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); numberOfNodeAdminMaintenanceFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.maintenance.fails"); numberOfCoredumpsOnHost = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.coredumps"); } private SecretAgentCheckConfig annotatedCheck(NodeSpec node, SecretAgentCheckConfig check) { check.withTag("namespace", "Vespa") .withTag("role", "tenants") .withTag("flavor", node.getFlavor()) .withTag("canonicalFlavor", node.getCanonicalFlavor()) .withTag("state", node.getState().toString()) .withTag("zone", environment.getZone()) .withTag("parentHostname", environment.getParentHostHostname()); node.getOwner().ifPresent(owner -> check .withTag("tenantName", owner.getTenant()) .withTag("app", owner.getApplication() + "." + owner.getInstance()) .withTag("applicationName", owner.getApplication()) .withTag("instanceName", owner.getInstance()) .withTag("applicationId", owner.getTenant() + "." + owner.getApplication() + "." + owner.getInstance())); node.getMembership().ifPresent(membership -> check .withTag("clustertype", membership.getClusterType()) .withTag("clusterid", membership.getClusterId())); node.getVespaVersion().ifPresent(version -> check.withTag("vespaVersion", version)); return check; } public void writeFilebeatConfig(ContainerName containerName, NodeSpec node) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); try { FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment); Optional<String> config = filebeatConfigProvider.getConfig(node); if (!config.isPresent()) { logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + node.toString()); return; } Path filebeatPath = environment.pathInNodeAdminFromPathInNode( containerName, Paths.get("/etc/filebeat/filebeat.yml")); Files.write(filebeatPath, config.get().getBytes()); logger.info("Wrote filebeat config."); } catch (Throwable t) { logger.error("Failed writing filebeat config; " + node, t); } } public Optional<Long> getDiskUsageFor(ContainerName containerName) { Path containerDir = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/home/")); try { return Optional.of(getDiskUsedInBytes(containerDir)); } catch (Throwable e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); logger.error("Problems during disk usage calculations in " + containerDir.toAbsolutePath(), e); return Optional.empty(); } } long getDiskUsedInBytes(Path path) throws IOException, InterruptedException { if (!Files.exists(path)) { return 0; } final String[] command = {"du", "-xsk", path.toString()}; Process duCommand = new ProcessBuilder().command(command).start(); if (!duCommand.waitFor(60, TimeUnit.SECONDS)) { duCommand.destroy(); throw new RuntimeException("Disk usage command timed out, aborting."); } String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream())); String[] results = output.split("\t"); if (results.length != 2) { throw new RuntimeException("Result from disk usage command not as expected: " + output); } long diskUsageKB = Long.valueOf(results[0]); return diskUsageKB * 1024; } /** * Deletes old log files for vespa, nginx, logstash, etc. */ public void removeOldFilesFromNode(ContainerName containerName) { if (! getMaintenanceThrottlerFor(containerName).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextRemoveOldFilesTime(); } private void addRemoveOldFilesCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName) { Path[] pathsToClean = { environment.pathInNodeUnderVespaHome("logs/elasticsearch2"), environment.pathInNodeUnderVespaHome("logs/logstash2"), environment.pathInNodeUnderVespaHome("logs/daemontools_y"), environment.pathInNodeUnderVespaHome("logs/nginx"), environment.pathInNodeUnderVespaHome("logs/vespa") }; for (Path pathToClean : pathsToClean) { Path path = environment.pathInNodeAdminFromPathInNode(containerName, pathToClean); if (Files.exists(path)) { maintainerExecutor.addJob("delete-files") .withArgument("basePath", path) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*\\.log.+") .withArgument("recursive", false); } } Path qrsDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/qrs")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", qrsDir) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*QueryAccessLog.*") .withArgument("recursive", false); Path logArchiveDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/logarchive")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", logArchiveDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); } /** * Checks if container has any new coredumps, reports and archives them if so * * @param force Set to true to bypass throttling */ public void handleCoreDumpsForContainer(ContainerName containerName, NodeSpec node, boolean force) { try { numberOfCoredumpsOnHost.sample(Files.list(environment.pathInNodeAdminToDoneCoredumps()).count()); } catch (IOException e) { } if (! getMaintenanceThrottlerFor(containerName).shouldHandleCoredumpsNow() && !force) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextHandleCoredumpsTime(); } private void addHandleCoredumpsCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName, NodeSpec node) { if (!environment.getCoredumpFeedEndpoint().isPresent()) { return; } Map<String, Object> attributes = new HashMap<>(); attributes.put("hostname", node.getHostname()); attributes.put("parent_hostname", environment.getParentHostHostname()); attributes.put("region", environment.getRegion()); attributes.put("environment", environment.getEnvironment()); attributes.put("flavor", node.getFlavor()); attributes.put("kernel_version", System.getProperty("os.version")); node.getCurrentDockerImage().ifPresent(image -> attributes.put("docker_image", image.asString())); node.getVespaVersion().ifPresent(version -> attributes.put("vespa_version", version)); node.getOwner().ifPresent(owner -> { attributes.put("tenant", owner.getTenant()); attributes.put("application", owner.getApplication()); attributes.put("instance", owner.getInstance()); }); maintainerExecutor.addJob("handle-core-dumps") .withArgument("doneCoredumpsPath", environment.pathInNodeAdminToDoneCoredumps()) .withArgument("coredumpsPath", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/crash"))) .withArgument("feedEndpoint", environment.getCoredumpFeedEndpoint().get()) .withArgument("attributes", attributes); } /** * Deletes old * * archived app data * * Vespa logs * * Filedistribution files */ public void cleanNodeAdmin() { if (! getMaintenanceThrottlerFor(NODE_ADMIN).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); maintainerExecutor.addJob("delete-directories") .withArgument("basePath", environment.getPathResolver().getApplicationStoragePathForNodeAdmin()) .withArgument("maxAgeSeconds", Duration.ofDays(7).getSeconds()) .withArgument("dirNameRegex", "^" + Pattern.quote(Environment.APPLICATION_STORAGE_CLEANUP_PATH_PREFIX)); Path nodeAdminJDiskLogsPath = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("logs/vespa/")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", nodeAdminJDiskLogsPath) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); maintainerExecutor.execute(); getMaintenanceThrottlerFor(NODE_ADMIN).updateNextRemoveOldFilesTime(); } /** * Prepares the container-storage for the next container by deleting/archiving all the data of the current container. * Removes old files, reports coredumps and archives container data, runs when container enters state "dirty" */ public void cleanupNodeStorage(ContainerName containerName, NodeSpec node) { MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); addArchiveNodeData(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).reset(); } private void addArchiveNodeData(MaintainerExecutor maintainerExecutor, ContainerName containerName) { maintainerExecutor.addJob("recursive-delete") .withArgument("path", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var"))); maintainerExecutor.addJob("move-files") .withArgument("from", environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/"))) .withArgument("to", environment.pathInNodeAdminToNodeCleanup(containerName)); } /** * Runs node-maintainer's SpecVerifier and returns its output * @param node Node specification containing the excepted values we want to verify against * @return new combined hardware divergence * @throws RuntimeException if exit code != 0 */ public String getHardwareDivergence(NodeSpec node) { List<String> arguments = new ArrayList<>(Arrays.asList("specification", "--disk", Double.toString(node.getMinDiskAvailableGb()), "--memory", Double.toString(node.getMinMainMemoryAvailableGb()), "--cpu_cores", Double.toString(node.getMinCpuCores()), "--is_ssd", Boolean.toString(node.isFastDisk()), "--ips", String.join(",", node.getIpAddresses()))); node.getHardwareDivergence().ifPresent(hardwareDivergence -> { arguments.add("--divergence"); arguments.add(hardwareDivergence); }); return executeMaintainer("com.yahoo.vespa.hosted.node.verification.Main", arguments.toArray(new String[0])); } private String executeMaintainer(String mainClass, String... args) { String[] command = Stream.concat( Stream.of("sudo", "VESPA_HOME=" + getDefaults().vespaHome(), getDefaults().underVespaHome("libexec/vespa/node-admin/maintenance.sh"), mainClass), Stream.of(args)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(command); if (result.getFirst() != 0) { numberOfNodeAdminMaintenanceFails.add(); throw new RuntimeException( String.format("Maintainer failed to execute command: %s, Exit code: %d, Stdout/stderr: %s", Arrays.toString(command), result.getFirst(), result.getSecond())); } return result.getSecond().trim(); } catch (IOException e) { throw new RuntimeException("Failed to execute maintainer", e); } } /** * Wrapper for node-admin-maintenance, queues up maintenances jobs and sends a single request to maintenance JVM */ private class MaintainerExecutor { private final List<MaintainerExecutorJob> jobs = new ArrayList<>(); MaintainerExecutorJob addJob(String jobName) { MaintainerExecutorJob job = new MaintainerExecutorJob(jobName); jobs.add(job); return job; } void execute() { String args; try { args = objectMapper.writeValueAsString(jobs); } catch (JsonProcessingException e) { throw new RuntimeException("Failed transform list of maintenance jobs to JSON"); } executeMaintainer("com.yahoo.vespa.hosted.node.maintainer.Maintainer", args); } } private class MaintainerExecutorJob { @JsonProperty(value="type") private final String type; @JsonProperty(value="arguments") private final Map<String, Object> arguments = new HashMap<>(); MaintainerExecutorJob(String type) { this.type = type; } MaintainerExecutorJob withArgument(String argument, Object value) { arguments.put(argument, (value instanceof Path) ? value.toString() : value); return this; } } private MaintenanceThrottler getMaintenanceThrottlerFor(ContainerName containerName) { maintenanceThrottlerByContainerName.putIfAbsent(containerName, new MaintenanceThrottler()); return maintenanceThrottlerByContainerName.get(containerName); } private class MaintenanceThrottler { private Instant nextRemoveOldFilesAt = Instant.EPOCH; private Instant nextHandleOldCoredumpsAt = Instant.EPOCH; void updateNextRemoveOldFilesTime() { nextRemoveOldFilesAt = clock.instant().plus(Duration.ofHours(1)); } boolean shouldRemoveOldFilesNow() { return !nextRemoveOldFilesAt.isAfter(clock.instant()); } void updateNextHandleCoredumpsTime() { nextHandleOldCoredumpsAt = clock.instant().plus(Duration.ofMinutes(5)); } boolean shouldHandleCoredumpsNow() { return !nextHandleOldCoredumpsAt.isAfter(clock.instant()); } void reset() { nextRemoveOldFilesAt = Instant.EPOCH; nextHandleOldCoredumpsAt = Instant.EPOCH; } } }
class StorageMaintainer { private static final ContainerName NODE_ADMIN = new ContainerName("node-admin"); private static final ObjectMapper objectMapper = new ObjectMapper(); private final GaugeWrapper numberOfCoredumpsOnHost; private final CounterWrapper numberOfNodeAdminMaintenanceFails; private final DockerOperations dockerOperations; private final ProcessExecuter processExecuter; private final Environment environment; private final Clock clock; private Map<ContainerName, MaintenanceThrottler> maintenanceThrottlerByContainerName = new ConcurrentHashMap<>(); public StorageMaintainer(DockerOperations dockerOperations, ProcessExecuter processExecuter, MetricReceiverWrapper metricReceiver, Environment environment, Clock clock) { this.dockerOperations = dockerOperations; this.processExecuter = processExecuter; this.environment = environment; this.clock = clock; Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); numberOfNodeAdminMaintenanceFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.maintenance.fails"); numberOfCoredumpsOnHost = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.coredumps"); } private SecretAgentCheckConfig annotatedCheck(NodeSpec node, SecretAgentCheckConfig check) { check.withTag("namespace", "Vespa") .withTag("role", "tenants") .withTag("flavor", node.getFlavor()) .withTag("canonicalFlavor", node.getCanonicalFlavor()) .withTag("state", node.getState().toString()) .withTag("zone", environment.getZone()) .withTag("parentHostname", environment.getParentHostHostname()); node.getOwner().ifPresent(owner -> check .withTag("tenantName", owner.getTenant()) .withTag("app", owner.getApplication() + "." + owner.getInstance()) .withTag("applicationName", owner.getApplication()) .withTag("instanceName", owner.getInstance()) .withTag("applicationId", owner.getTenant() + "." + owner.getApplication() + "." + owner.getInstance())); node.getMembership().ifPresent(membership -> check .withTag("clustertype", membership.getClusterType()) .withTag("clusterid", membership.getClusterId())); node.getVespaVersion().ifPresent(version -> check.withTag("vespaVersion", version)); return check; } public void writeFilebeatConfig(ContainerName containerName, NodeSpec node) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); try { FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment); Optional<String> config = filebeatConfigProvider.getConfig(node); if (!config.isPresent()) { logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + node.toString()); return; } Path filebeatPath = environment.pathInNodeAdminFromPathInNode( containerName, Paths.get("/etc/filebeat/filebeat.yml")); Files.write(filebeatPath, config.get().getBytes()); logger.info("Wrote filebeat config."); } catch (Throwable t) { logger.error("Failed writing filebeat config; " + node, t); } } public Optional<Long> getDiskUsageFor(ContainerName containerName) { Path containerDir = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/home/")); try { return Optional.of(getDiskUsedInBytes(containerDir)); } catch (Throwable e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); logger.error("Problems during disk usage calculations in " + containerDir.toAbsolutePath(), e); return Optional.empty(); } } long getDiskUsedInBytes(Path path) throws IOException, InterruptedException { if (!Files.exists(path)) { return 0; } final String[] command = {"du", "-xsk", path.toString()}; Process duCommand = new ProcessBuilder().command(command).start(); if (!duCommand.waitFor(60, TimeUnit.SECONDS)) { duCommand.destroy(); throw new RuntimeException("Disk usage command timed out, aborting."); } String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream())); String[] results = output.split("\t"); if (results.length != 2) { throw new RuntimeException("Result from disk usage command not as expected: " + output); } long diskUsageKB = Long.valueOf(results[0]); return diskUsageKB * 1024; } /** * Deletes old log files for vespa, nginx, logstash, etc. */ public void removeOldFilesFromNode(ContainerName containerName) { if (! getMaintenanceThrottlerFor(containerName).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextRemoveOldFilesTime(); } private void addRemoveOldFilesCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName) { Path[] pathsToClean = { environment.pathInNodeUnderVespaHome("logs/elasticsearch2"), environment.pathInNodeUnderVespaHome("logs/logstash2"), environment.pathInNodeUnderVespaHome("logs/daemontools_y"), environment.pathInNodeUnderVespaHome("logs/nginx"), environment.pathInNodeUnderVespaHome("logs/vespa") }; for (Path pathToClean : pathsToClean) { Path path = environment.pathInNodeAdminFromPathInNode(containerName, pathToClean); if (Files.exists(path)) { maintainerExecutor.addJob("delete-files") .withArgument("basePath", path) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*\\.log.+") .withArgument("recursive", false); } } Path qrsDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/qrs")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", qrsDir) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*QueryAccessLog.*") .withArgument("recursive", false); Path logArchiveDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/logarchive")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", logArchiveDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); } /** * Checks if container has any new coredumps, reports and archives them if so * * @param force Set to true to bypass throttling */ public void handleCoreDumpsForContainer(ContainerName containerName, NodeSpec node, boolean force) { try { numberOfCoredumpsOnHost.sample(Files.list(environment.pathInNodeAdminToDoneCoredumps()).count()); } catch (IOException e) { } if (! getMaintenanceThrottlerFor(containerName).shouldHandleCoredumpsNow() && !force) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextHandleCoredumpsTime(); } private void addHandleCoredumpsCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName, NodeSpec node) { if (!environment.getCoredumpFeedEndpoint().isPresent()) { return; } Map<String, Object> attributes = new HashMap<>(); attributes.put("hostname", node.getHostname()); attributes.put("parent_hostname", environment.getParentHostHostname()); attributes.put("region", environment.getRegion()); attributes.put("environment", environment.getEnvironment()); attributes.put("flavor", node.getFlavor()); attributes.put("kernel_version", System.getProperty("os.version")); node.getCurrentDockerImage().ifPresent(image -> attributes.put("docker_image", image.asString())); node.getVespaVersion().ifPresent(version -> attributes.put("vespa_version", version)); node.getOwner().ifPresent(owner -> { attributes.put("tenant", owner.getTenant()); attributes.put("application", owner.getApplication()); attributes.put("instance", owner.getInstance()); }); maintainerExecutor.addJob("handle-core-dumps") .withArgument("doneCoredumpsPath", environment.pathInNodeAdminToDoneCoredumps()) .withArgument("coredumpsPath", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/crash"))) .withArgument("feedEndpoint", environment.getCoredumpFeedEndpoint().get()) .withArgument("attributes", attributes); } /** * Deletes old * * archived app data * * Vespa logs * * Filedistribution files */ public void cleanNodeAdmin() { if (! getMaintenanceThrottlerFor(NODE_ADMIN).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); maintainerExecutor.addJob("delete-directories") .withArgument("basePath", environment.getPathResolver().getApplicationStoragePathForNodeAdmin()) .withArgument("maxAgeSeconds", Duration.ofDays(7).getSeconds()) .withArgument("dirNameRegex", "^" + Pattern.quote(Environment.APPLICATION_STORAGE_CLEANUP_PATH_PREFIX)); Path nodeAdminJDiskLogsPath = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("logs/vespa/")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", nodeAdminJDiskLogsPath) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); maintainerExecutor.execute(); getMaintenanceThrottlerFor(NODE_ADMIN).updateNextRemoveOldFilesTime(); } /** * Prepares the container-storage for the next container by deleting/archiving all the data of the current container. * Removes old files, reports coredumps and archives container data, runs when container enters state "dirty" */ public void cleanupNodeStorage(ContainerName containerName, NodeSpec node) { MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); addArchiveNodeData(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).reset(); } private void addArchiveNodeData(MaintainerExecutor maintainerExecutor, ContainerName containerName) { maintainerExecutor.addJob("recursive-delete") .withArgument("path", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var"))); maintainerExecutor.addJob("move-files") .withArgument("from", environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/"))) .withArgument("to", environment.pathInNodeAdminToNodeCleanup(containerName)); } /** * Runs node-maintainer's SpecVerifier and returns its output * @param node Node specification containing the excepted values we want to verify against * @return new combined hardware divergence * @throws RuntimeException if exit code != 0 */ public String getHardwareDivergence(NodeSpec node) { List<String> arguments = new ArrayList<>(Arrays.asList("specification", "--disk", Double.toString(node.getMinDiskAvailableGb()), "--memory", Double.toString(node.getMinMainMemoryAvailableGb()), "--cpu_cores", Double.toString(node.getMinCpuCores()), "--is_ssd", Boolean.toString(node.isFastDisk()), "--ips", String.join(",", node.getIpAddresses()))); node.getHardwareDivergence().ifPresent(hardwareDivergence -> { arguments.add("--divergence"); arguments.add(hardwareDivergence); }); return executeMaintainer("com.yahoo.vespa.hosted.node.verification.Main", arguments.toArray(new String[0])); } private String executeMaintainer(String mainClass, String... args) { String[] command = Stream.concat( Stream.of("sudo", "VESPA_HOME=" + getDefaults().vespaHome(), getDefaults().underVespaHome("libexec/vespa/node-admin/maintenance.sh"), mainClass), Stream.of(args)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(command); if (result.getFirst() != 0) { numberOfNodeAdminMaintenanceFails.add(); throw new RuntimeException( String.format("Maintainer failed to execute command: %s, Exit code: %d, Stdout/stderr: %s", Arrays.toString(command), result.getFirst(), result.getSecond())); } return result.getSecond().trim(); } catch (IOException e) { throw new RuntimeException("Failed to execute maintainer", e); } } /** * Wrapper for node-admin-maintenance, queues up maintenances jobs and sends a single request to maintenance JVM */ private class MaintainerExecutor { private final List<MaintainerExecutorJob> jobs = new ArrayList<>(); MaintainerExecutorJob addJob(String jobName) { MaintainerExecutorJob job = new MaintainerExecutorJob(jobName); jobs.add(job); return job; } void execute() { String args; try { args = objectMapper.writeValueAsString(jobs); } catch (JsonProcessingException e) { throw new RuntimeException("Failed transform list of maintenance jobs to JSON"); } executeMaintainer("com.yahoo.vespa.hosted.node.maintainer.Maintainer", args); } } private class MaintainerExecutorJob { @JsonProperty(value="type") private final String type; @JsonProperty(value="arguments") private final Map<String, Object> arguments = new HashMap<>(); MaintainerExecutorJob(String type) { this.type = type; } MaintainerExecutorJob withArgument(String argument, Object value) { arguments.put(argument, (value instanceof Path) ? value.toString() : value); return this; } } private MaintenanceThrottler getMaintenanceThrottlerFor(ContainerName containerName) { maintenanceThrottlerByContainerName.putIfAbsent(containerName, new MaintenanceThrottler()); return maintenanceThrottlerByContainerName.get(containerName); } private class MaintenanceThrottler { private Instant nextRemoveOldFilesAt = Instant.EPOCH; private Instant nextHandleOldCoredumpsAt = Instant.EPOCH; void updateNextRemoveOldFilesTime() { nextRemoveOldFilesAt = clock.instant().plus(Duration.ofHours(1)); } boolean shouldRemoveOldFilesNow() { return !nextRemoveOldFilesAt.isAfter(clock.instant()); } void updateNextHandleCoredumpsTime() { nextHandleOldCoredumpsAt = clock.instant().plus(Duration.ofMinutes(5)); } boolean shouldHandleCoredumpsNow() { return !nextHandleOldCoredumpsAt.isAfter(clock.instant()); } void reset() { nextRemoveOldFilesAt = Instant.EPOCH; nextHandleOldCoredumpsAt = Instant.EPOCH; } } }
why is this loop different from the same loop in the other direction?
public int write(ByteBuffer src) throws IOException { if (flush() == FlushResult.NEED_WRITE) return 0; int totalBytesWrapped = 0; while (src.hasRemaining()) { int bytesWrapped = applicationDataWrap(src); if (bytesWrapped == 0) break; totalBytesWrapped += bytesWrapped; } return totalBytesWrapped; }
while (src.hasRemaining()) {
public int write(ByteBuffer src) throws IOException { if (flush() == FlushResult.NEED_WRITE) return 0; int totalBytesWrapped = 0; int bytesWrapped; do { bytesWrapped = applicationDataWrap(src); totalBytesWrapped += bytesWrapped; } while (bytesWrapped > 0 && wrapBuffer.bytes() < sessionPacketBufferSize); return totalBytesWrapped; }
class TlsCryptoSocket implements CryptoSocket { private static final ByteBuffer NULL_BUFFER = ByteBuffer.allocate(0); private static final Logger log = Logger.getLogger(TlsCryptoSocket.class.getName()); private enum HandshakeState { NOT_STARTED, NEED_READ, NEED_WRITE, COMPLETED } private final SocketChannel channel; private final SSLEngine sslEngine; private final Buffer wrapBuffer; private final Buffer unwrapBuffer; private ByteBuffer handshakeDummyBuffer; private HandshakeState handshakeState; public TlsCryptoSocket(SocketChannel channel, SSLEngine sslEngine) { this.channel = channel; this.sslEngine = sslEngine; SSLSession nullSession = sslEngine.getSession(); this.wrapBuffer = new Buffer(nullSession.getPacketBufferSize() * 2); this.unwrapBuffer = new Buffer(nullSession.getPacketBufferSize() * 2); this.handshakeDummyBuffer = ByteBuffer.allocate(nullSession.getApplicationBufferSize()); this.handshakeState = HandshakeState.NOT_STARTED; } @Override public SocketChannel channel() { return channel; } @Override public HandshakeResult handshake() throws IOException { HandshakeState newHandshakeState = processHandshakeState(this.handshakeState); log.fine(() -> String.format("Handshake state '%s -> %s'", this.handshakeState, newHandshakeState)); this.handshakeState = newHandshakeState; return toHandshakeResult(newHandshakeState); } private HandshakeState processHandshakeState(HandshakeState state) throws IOException { switch (state) { case NOT_STARTED: sslEngine.beginHandshake(); break; case NEED_WRITE: channelWrite(); break; case NEED_READ: channelRead(); break; case COMPLETED: return HandshakeState.COMPLETED; default: throw unhandledStateException(state); } while (true) { switch (sslEngine.getHandshakeStatus()) { case NOT_HANDSHAKING: if (hasWrapBufferMoreData()) return HandshakeState.NEED_WRITE; sslEngine.setEnableSessionCreation(false); handshakeDummyBuffer = null; return HandshakeState.COMPLETED; case NEED_TASK: sslEngine.getDelegatedTask().run(); break; case NEED_UNWRAP: if (hasWrapBufferMoreData()) return HandshakeState.NEED_WRITE; if (!handshakeUnwrap()) return HandshakeState.NEED_READ; break; case NEED_WRAP: if (!handshakeWrap()) return HandshakeState.NEED_WRITE; break; default: throw new IllegalStateException("Unexpected handshake status: " + sslEngine.getHandshakeStatus()); } } } private static HandshakeResult toHandshakeResult(HandshakeState state) { switch (state) { case NEED_READ: return HandshakeResult.NEED_READ; case NEED_WRITE: return HandshakeResult.NEED_WRITE; case COMPLETED: return HandshakeResult.DONE; default: throw unhandledStateException(state); } } @Override public int getMinimumReadBufferSize() { return sslEngine.getSession().getApplicationBufferSize(); } @Override public int read(ByteBuffer dst) throws IOException { verifyHandshakeCompleted(); int bytesUnwrapped = applicationDataUnwrap(dst); if (bytesUnwrapped > 0) return bytesUnwrapped; int bytesRead = channelRead(); if (bytesRead == 0) return 0; return drain(dst); } @Override public int drain(ByteBuffer dst) throws IOException { verifyHandshakeCompleted(); int totalBytesUnwrapped = 0; int bytesUnwrapped; do { bytesUnwrapped = applicationDataUnwrap(dst); totalBytesUnwrapped += bytesUnwrapped; } while (bytesUnwrapped > 0); return totalBytesUnwrapped; } @Override @Override public FlushResult flush() throws IOException { channelWrite(); return hasWrapBufferMoreData() ? FlushResult.NEED_WRITE : FlushResult.DONE; } private boolean handshakeWrap() throws IOException { SSLEngineResult result = sslEngineWrap(NULL_BUFFER); switch (result.getStatus()) { case OK: return true; case BUFFER_OVERFLOW: return false; default: throw unexpectedStatusException(result.getStatus()); } } private int applicationDataWrap(ByteBuffer src) throws IOException { SSLEngineResult result = sslEngineWrap(src); switch (result.getStatus()) { case OK: int bytesConsumed = result.bytesConsumed(); if (bytesConsumed == 0) throw new SSLException("Got handshake data in application data wrap"); return bytesConsumed; case BUFFER_OVERFLOW: return 0; default: throw unexpectedStatusException(result.getStatus()); } } private SSLEngineResult sslEngineWrap(ByteBuffer src) throws IOException { SSLEngineResult result = sslEngine.wrap(src, wrapBuffer.getWritable(0)); if (result.getStatus() == Status.CLOSED) throw new ClosedChannelException(); return result; } private boolean handshakeUnwrap() throws IOException { SSLEngineResult result = sslEngineUnwrap(handshakeDummyBuffer); switch (result.getStatus()) { case OK: if (result.bytesProduced() > 0) throw new SSLException("Got application data in handshake unwrap"); return true; case BUFFER_UNDERFLOW: return false; default: throw unexpectedStatusException(result.getStatus()); } } private int applicationDataUnwrap(ByteBuffer dst) throws IOException { SSLEngineResult result = sslEngineUnwrap(dst); switch (result.getStatus()) { case OK: int bytesProduced = result.bytesProduced(); if (bytesProduced == 0) throw new SSLException("Got handshake data in application data unwrap"); return bytesProduced; case BUFFER_OVERFLOW: case BUFFER_UNDERFLOW: return 0; default: throw unexpectedStatusException(result.getStatus()); } } private SSLEngineResult sslEngineUnwrap(ByteBuffer dst) throws IOException { SSLEngineResult result = sslEngine.unwrap(unwrapBuffer.getReadable(), dst); if (result.getStatus() == Status.CLOSED) throw new ClosedChannelException(); return result; } private int channelRead() throws IOException { int read = channel.read(unwrapBuffer.getWritable(0)); if (read == -1) throw new ClosedChannelException(); return read; } private int channelWrite() throws IOException { return channel.write(wrapBuffer.getReadable()); } private static IllegalStateException unhandledStateException(HandshakeState state) { return new IllegalStateException("Unhandled state: " + state); } private static IllegalStateException unexpectedStatusException(Status status) { return new IllegalStateException("Unexpected status: " + status); } private void verifyHandshakeCompleted() throws SSLException { if (handshakeState != HandshakeState.COMPLETED) throw new SSLException("Handshake not completed: handshakeState=" + handshakeState); } private boolean hasWrapBufferMoreData() { return wrapBuffer.bytes() > 0; } }
class TlsCryptoSocket implements CryptoSocket { private static final ByteBuffer NULL_BUFFER = ByteBuffer.allocate(0); private static final Logger log = Logger.getLogger(TlsCryptoSocket.class.getName()); private enum HandshakeState { NOT_STARTED, NEED_READ, NEED_WRITE, COMPLETED } private final SocketChannel channel; private final SSLEngine sslEngine; private final Buffer wrapBuffer; private final Buffer unwrapBuffer; private int sessionPacketBufferSize; private int sessionApplicationBufferSize; private ByteBuffer handshakeDummyBuffer; private HandshakeState handshakeState; public TlsCryptoSocket(SocketChannel channel, SSLEngine sslEngine) { this.channel = channel; this.sslEngine = sslEngine; SSLSession nullSession = sslEngine.getSession(); this.wrapBuffer = new Buffer(nullSession.getPacketBufferSize() * 2); this.unwrapBuffer = new Buffer(nullSession.getPacketBufferSize() * 2); this.handshakeDummyBuffer = ByteBuffer.allocate(nullSession.getApplicationBufferSize()); this.handshakeState = HandshakeState.NOT_STARTED; } @Override public SocketChannel channel() { return channel; } @Override public HandshakeResult handshake() throws IOException { HandshakeState newHandshakeState = processHandshakeState(this.handshakeState); log.fine(() -> String.format("Handshake state '%s -> %s'", this.handshakeState, newHandshakeState)); this.handshakeState = newHandshakeState; return toHandshakeResult(newHandshakeState); } private HandshakeState processHandshakeState(HandshakeState state) throws IOException { switch (state) { case NOT_STARTED: sslEngine.beginHandshake(); break; case NEED_WRITE: channelWrite(); break; case NEED_READ: channelRead(); break; case COMPLETED: return HandshakeState.COMPLETED; default: throw unhandledStateException(state); } while (true) { switch (sslEngine.getHandshakeStatus()) { case NOT_HANDSHAKING: if (wrapBuffer.bytes() > 0) return HandshakeState.NEED_WRITE; sslEngine.setEnableSessionCreation(false); handshakeDummyBuffer = null; SSLSession session = sslEngine.getSession(); sessionApplicationBufferSize = session.getApplicationBufferSize(); sessionPacketBufferSize = session.getPacketBufferSize(); return HandshakeState.COMPLETED; case NEED_TASK: sslEngine.getDelegatedTask().run(); break; case NEED_UNWRAP: if (wrapBuffer.bytes() > 0) return HandshakeState.NEED_WRITE; if (!handshakeUnwrap()) return HandshakeState.NEED_READ; break; case NEED_WRAP: if (!handshakeWrap()) return HandshakeState.NEED_WRITE; break; default: throw new IllegalStateException("Unexpected handshake status: " + sslEngine.getHandshakeStatus()); } } } private static HandshakeResult toHandshakeResult(HandshakeState state) { switch (state) { case NEED_READ: return HandshakeResult.NEED_READ; case NEED_WRITE: return HandshakeResult.NEED_WRITE; case COMPLETED: return HandshakeResult.DONE; default: throw unhandledStateException(state); } } @Override public int getMinimumReadBufferSize() { return sessionApplicationBufferSize; } @Override public int read(ByteBuffer dst) throws IOException { verifyHandshakeCompleted(); int bytesUnwrapped = drain(dst); if (bytesUnwrapped > 0) return bytesUnwrapped; int bytesRead = channelRead(); if (bytesRead == 0) return 0; return drain(dst); } @Override public int drain(ByteBuffer dst) throws IOException { verifyHandshakeCompleted(); int totalBytesUnwrapped = 0; int bytesUnwrapped; do { bytesUnwrapped = applicationDataUnwrap(dst); totalBytesUnwrapped += bytesUnwrapped; } while (bytesUnwrapped > 0); return totalBytesUnwrapped; } @Override @Override public FlushResult flush() throws IOException { channelWrite(); return wrapBuffer.bytes() > 0 ? FlushResult.NEED_WRITE : FlushResult.DONE; } private boolean handshakeWrap() throws IOException { SSLEngineResult result = sslEngineWrap(NULL_BUFFER); switch (result.getStatus()) { case OK: return true; case BUFFER_OVERFLOW: return false; default: throw unexpectedStatusException(result.getStatus()); } } private int applicationDataWrap(ByteBuffer src) throws IOException { SSLEngineResult result = sslEngineWrap(src); if (result.getHandshakeStatus() != HandshakeStatus.NOT_HANDSHAKING) throw new SSLException("Renegotiation detected"); switch (result.getStatus()) { case OK: return result.bytesConsumed(); case BUFFER_OVERFLOW: return 0; default: throw unexpectedStatusException(result.getStatus()); } } private SSLEngineResult sslEngineWrap(ByteBuffer src) throws IOException { SSLEngineResult result = sslEngine.wrap(src, wrapBuffer.getWritable(sessionPacketBufferSize)); if (result.getStatus() == Status.CLOSED) throw new ClosedChannelException(); return result; } private boolean handshakeUnwrap() throws IOException { SSLEngineResult result = sslEngineUnwrap(handshakeDummyBuffer); switch (result.getStatus()) { case OK: if (result.bytesProduced() > 0) throw new SSLException("Got application data in handshake unwrap"); return true; case BUFFER_UNDERFLOW: return false; default: throw unexpectedStatusException(result.getStatus()); } } private int applicationDataUnwrap(ByteBuffer dst) throws IOException { SSLEngineResult result = sslEngineUnwrap(dst); if (result.getHandshakeStatus() != HandshakeStatus.NOT_HANDSHAKING) throw new SSLException("Renegotiation detected"); switch (result.getStatus()) { case OK: return result.bytesProduced(); case BUFFER_OVERFLOW: case BUFFER_UNDERFLOW: return 0; default: throw unexpectedStatusException(result.getStatus()); } } private SSLEngineResult sslEngineUnwrap(ByteBuffer dst) throws IOException { SSLEngineResult result = sslEngine.unwrap(unwrapBuffer.getReadable(), dst); if (result.getStatus() == Status.CLOSED) throw new ClosedChannelException(); return result; } private int channelRead() throws IOException { int read = channel.read(unwrapBuffer.getWritable(sessionPacketBufferSize)); if (read == -1) throw new ClosedChannelException(); return read; } private int channelWrite() throws IOException { return channel.write(wrapBuffer.getReadable()); } private static IllegalStateException unhandledStateException(HandshakeState state) { return new IllegalStateException("Unhandled state: " + state); } private static IllegalStateException unexpectedStatusException(Status status) { return new IllegalStateException("Unexpected status: " + status); } private void verifyHandshakeCompleted() throws SSLException { if (handshakeState != HandshakeState.COMPLETED) throw new SSLException("Handshake not completed: handshakeState=" + handshakeState); } }
`ssl-status`
public void writeMetricsConfig(ContainerName containerName, NodeSpec node) { List<SecretAgentCheckConfig> configs = new ArrayList<>(); Path hostLifeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_host_life"); SecretAgentCheckConfig hostLifeSchedule = new SecretAgentCheckConfig("host-life", 60, hostLifeCheckPath); configs.add(annotatedCheck(node, hostLifeSchedule)); Path ntpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ntp"); SecretAgentCheckConfig ntpSchedule = new SecretAgentCheckConfig("ntp", 60, ntpCheckPath); configs.add(annotatedCheck(node, ntpSchedule)); Path coredumpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_coredumps"); SecretAgentCheckConfig coredumpSchedule = new SecretAgentCheckConfig("system-coredumps-processing", 300, coredumpCheckPath, "--application", "system-coredumps-processing", "--lastmin", "129600", "--crit", "1", "--coredir", environment.pathInNodeUnderVespaHome("var/crash/processing").toString()); configs.add(annotatedCheck(node, coredumpSchedule)); if (node.getNodeType() != NodeType.config) { Path vespaHealthCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa_health"); SecretAgentCheckConfig vespaHealthSchedule = new SecretAgentCheckConfig("vespa", 60, vespaHealthCheckPath, "all"); configs.add(annotatedCheck(node, vespaHealthSchedule)); Path vespaCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa"); SecretAgentCheckConfig vespaSchedule = new SecretAgentCheckConfig("vespa", 60, vespaCheckPath, "all"); configs.add(annotatedCheck(node, vespaSchedule)); } if (node.getNodeType() == NodeType.config) { Path configServerCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ymonsb2"); SecretAgentCheckConfig configServerSchedule = new SecretAgentCheckConfig("configserver", 60, configServerCheckPath, "-zero", "configserver"); configs.add(annotatedCheck(node, configServerSchedule)); Path zkbackupCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig zkbackupSchedule = new SecretAgentCheckConfig("zkbackupage", 300, zkbackupCheckPath, "-f", environment.pathInNodeUnderVespaHome("var/vespa-hosted/zkbackup.stat").toString(), "-m", "150", "-a", "config-zkbackupage"); configs.add(annotatedCheck(node, zkbackupSchedule)); } if (node.getNodeType() == NodeType.proxy) { Path routingAgeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig routingAgeSchedule = new SecretAgentCheckConfig("routing-configage", 60, routingAgeCheckPath, "-f", environment.pathInNodeUnderVespaHome("var/vespa-hosted/routing/nginx.conf").toString(), "-m", "90", "-a", "routing-configage"); configs.add(annotatedCheck(node, routingAgeSchedule)); Path sslCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ssl_status"); SecretAgentCheckConfig sslSchedule = new SecretAgentCheckConfig("ssh-status", 300, sslCheckPath, "-e", "localhost", "-p", "4443", "-t", "30"); configs.add(annotatedCheck(node, sslSchedule)); } Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/etc/yamas-agent/")); configs.forEach(s -> IOExceptionUtil.uncheck(() -> s.writeTo(yamasAgentFolder))); final String[] restartYamasAgent = new String[]{"service", "yamas-agent", "restart"}; dockerOperations.executeCommandInContainerAsRoot(containerName, restartYamasAgent); }
SecretAgentCheckConfig sslSchedule = new SecretAgentCheckConfig("ssh-status", 300,
public void writeMetricsConfig(ContainerName containerName, NodeSpec node) { List<SecretAgentCheckConfig> configs = new ArrayList<>(); Path hostLifeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_host_life"); SecretAgentCheckConfig hostLifeSchedule = new SecretAgentCheckConfig("host-life", 60, hostLifeCheckPath); configs.add(annotatedCheck(node, hostLifeSchedule)); Path ntpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ntp"); SecretAgentCheckConfig ntpSchedule = new SecretAgentCheckConfig("ntp", 60, ntpCheckPath); configs.add(annotatedCheck(node, ntpSchedule)); Path coredumpCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_coredumps"); SecretAgentCheckConfig coredumpSchedule = new SecretAgentCheckConfig("system-coredumps-processing", 300, coredumpCheckPath, "--application", "system-coredumps-processing", "--lastmin", "129600", "--crit", "1", "--coredir", environment.pathInNodeUnderVespaHome("var/crash/processing").toString()); configs.add(annotatedCheck(node, coredumpSchedule)); if (node.getNodeType() != NodeType.config) { Path vespaHealthCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa_health"); SecretAgentCheckConfig vespaHealthSchedule = new SecretAgentCheckConfig("vespa-health", 60, vespaHealthCheckPath, "all"); configs.add(annotatedCheck(node, vespaHealthSchedule)); Path vespaCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa"); SecretAgentCheckConfig vespaSchedule = new SecretAgentCheckConfig("vespa", 60, vespaCheckPath, "all"); configs.add(annotatedCheck(node, vespaSchedule)); } if (node.getNodeType() == NodeType.config) { Path configServerCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ymonsb2"); SecretAgentCheckConfig configServerSchedule = new SecretAgentCheckConfig("configserver", 60, configServerCheckPath, "-zero", "configserver"); configs.add(annotatedCheck(node, configServerSchedule)); Path zkbackupCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig zkbackupSchedule = new SecretAgentCheckConfig("zkbackupage", 300, zkbackupCheckPath, "-f", environment.pathInNodeUnderVespaHome("var/vespa-hosted/zkbackup.stat").toString(), "-m", "150", "-a", "config-zkbackupage"); configs.add(annotatedCheck(node, zkbackupSchedule)); } if (node.getNodeType() == NodeType.proxy) { Path routingAgeCheckPath = environment.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig routingAgeSchedule = new SecretAgentCheckConfig("routing-configage", 60, routingAgeCheckPath, "-f", environment.pathInNodeUnderVespaHome("var/vespa-hosted/routing/nginx.conf").toString(), "-m", "90", "-a", "routing-configage"); configs.add(annotatedCheck(node, routingAgeSchedule)); Path sslCheckPath = environment.pathInNodeUnderVespaHome("libexec/yms/yms_check_ssl_status"); SecretAgentCheckConfig sslSchedule = new SecretAgentCheckConfig("ssl-status", 300, sslCheckPath, "-e", "localhost", "-p", "4443", "-t", "30"); configs.add(annotatedCheck(node, sslSchedule)); } Path yamasAgentFolder = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/etc/yamas-agent/")); configs.forEach(s -> IOExceptionUtil.uncheck(() -> s.writeTo(yamasAgentFolder))); final String[] restartYamasAgent = new String[]{"service", "yamas-agent", "restart"}; dockerOperations.executeCommandInContainerAsRoot(containerName, restartYamasAgent); }
class StorageMaintainer { private static final ContainerName NODE_ADMIN = new ContainerName("node-admin"); private static final ObjectMapper objectMapper = new ObjectMapper(); private final GaugeWrapper numberOfCoredumpsOnHost; private final CounterWrapper numberOfNodeAdminMaintenanceFails; private final DockerOperations dockerOperations; private final ProcessExecuter processExecuter; private final Environment environment; private final Clock clock; private Map<ContainerName, MaintenanceThrottler> maintenanceThrottlerByContainerName = new ConcurrentHashMap<>(); public StorageMaintainer(DockerOperations dockerOperations, ProcessExecuter processExecuter, MetricReceiverWrapper metricReceiver, Environment environment, Clock clock) { this.dockerOperations = dockerOperations; this.processExecuter = processExecuter; this.environment = environment; this.clock = clock; Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); numberOfNodeAdminMaintenanceFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.maintenance.fails"); numberOfCoredumpsOnHost = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.coredumps"); } private SecretAgentCheckConfig annotatedCheck(NodeSpec node, SecretAgentCheckConfig check) { check.withTag("namespace", "Vespa") .withTag("role", "tenants") .withTag("flavor", node.getFlavor()) .withTag("canonicalFlavor", node.getCanonicalFlavor()) .withTag("state", node.getState().toString()) .withTag("zone", environment.getZone()) .withTag("parentHostname", environment.getParentHostHostname()); node.getOwner().ifPresent(owner -> check .withTag("tenantName", owner.getTenant()) .withTag("app", owner.getApplication() + "." + owner.getInstance()) .withTag("applicationName", owner.getApplication()) .withTag("instanceName", owner.getInstance()) .withTag("applicationId", owner.getTenant() + "." + owner.getApplication() + "." + owner.getInstance())); node.getMembership().ifPresent(membership -> check .withTag("clustertype", membership.getClusterType()) .withTag("clusterid", membership.getClusterId())); node.getVespaVersion().ifPresent(version -> check.withTag("vespaVersion", version)); return check; } public void writeFilebeatConfig(ContainerName containerName, NodeSpec node) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); try { FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment); Optional<String> config = filebeatConfigProvider.getConfig(node); if (!config.isPresent()) { logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + node.toString()); return; } Path filebeatPath = environment.pathInNodeAdminFromPathInNode( containerName, Paths.get("/etc/filebeat/filebeat.yml")); Files.write(filebeatPath, config.get().getBytes()); logger.info("Wrote filebeat config."); } catch (Throwable t) { logger.error("Failed writing filebeat config; " + node, t); } } public Optional<Long> getDiskUsageFor(ContainerName containerName) { Path containerDir = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/home/")); try { return Optional.of(getDiskUsedInBytes(containerDir)); } catch (Throwable e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); logger.error("Problems during disk usage calculations in " + containerDir.toAbsolutePath(), e); return Optional.empty(); } } long getDiskUsedInBytes(Path path) throws IOException, InterruptedException { if (!Files.exists(path)) { return 0; } final String[] command = {"du", "-xsk", path.toString()}; Process duCommand = new ProcessBuilder().command(command).start(); if (!duCommand.waitFor(60, TimeUnit.SECONDS)) { duCommand.destroy(); throw new RuntimeException("Disk usage command timed out, aborting."); } String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream())); String[] results = output.split("\t"); if (results.length != 2) { throw new RuntimeException("Result from disk usage command not as expected: " + output); } long diskUsageKB = Long.valueOf(results[0]); return diskUsageKB * 1024; } /** * Deletes old log files for vespa, nginx, logstash, etc. */ public void removeOldFilesFromNode(ContainerName containerName) { if (! getMaintenanceThrottlerFor(containerName).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextRemoveOldFilesTime(); } private void addRemoveOldFilesCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName) { Path[] pathsToClean = { environment.pathInNodeUnderVespaHome("logs/elasticsearch2"), environment.pathInNodeUnderVespaHome("logs/logstash2"), environment.pathInNodeUnderVespaHome("logs/daemontools_y"), environment.pathInNodeUnderVespaHome("logs/nginx"), environment.pathInNodeUnderVespaHome("logs/vespa") }; for (Path pathToClean : pathsToClean) { Path path = environment.pathInNodeAdminFromPathInNode(containerName, pathToClean); if (Files.exists(path)) { maintainerExecutor.addJob("delete-files") .withArgument("basePath", path) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*\\.log.+") .withArgument("recursive", false); } } Path qrsDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/qrs")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", qrsDir) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*QueryAccessLog.*") .withArgument("recursive", false); Path logArchiveDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/logarchive")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", logArchiveDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); } /** * Checks if container has any new coredumps, reports and archives them if so * * @param force Set to true to bypass throttling */ public void handleCoreDumpsForContainer(ContainerName containerName, NodeSpec node, boolean force) { try { numberOfCoredumpsOnHost.sample(Files.list(environment.pathInNodeAdminToDoneCoredumps()).count()); } catch (IOException e) { } if (! getMaintenanceThrottlerFor(containerName).shouldHandleCoredumpsNow() && !force) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextHandleCoredumpsTime(); } private void addHandleCoredumpsCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName, NodeSpec node) { if (!environment.getCoredumpFeedEndpoint().isPresent()) { return; } Map<String, Object> attributes = new HashMap<>(); attributes.put("hostname", node.getHostname()); attributes.put("parent_hostname", environment.getParentHostHostname()); attributes.put("region", environment.getRegion()); attributes.put("environment", environment.getEnvironment()); attributes.put("flavor", node.getFlavor()); attributes.put("kernel_version", System.getProperty("os.version")); node.getCurrentDockerImage().ifPresent(image -> attributes.put("docker_image", image.asString())); node.getVespaVersion().ifPresent(version -> attributes.put("vespa_version", version)); node.getOwner().ifPresent(owner -> { attributes.put("tenant", owner.getTenant()); attributes.put("application", owner.getApplication()); attributes.put("instance", owner.getInstance()); }); maintainerExecutor.addJob("handle-core-dumps") .withArgument("doneCoredumpsPath", environment.pathInNodeAdminToDoneCoredumps()) .withArgument("coredumpsPath", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/crash"))) .withArgument("feedEndpoint", environment.getCoredumpFeedEndpoint().get()) .withArgument("attributes", attributes); } /** * Deletes old * * archived app data * * Vespa logs * * Filedistribution files */ public void cleanNodeAdmin() { if (! getMaintenanceThrottlerFor(NODE_ADMIN).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); maintainerExecutor.addJob("delete-directories") .withArgument("basePath", environment.getPathResolver().getApplicationStoragePathForNodeAdmin()) .withArgument("maxAgeSeconds", Duration.ofDays(7).getSeconds()) .withArgument("dirNameRegex", "^" + Pattern.quote(Environment.APPLICATION_STORAGE_CLEANUP_PATH_PREFIX)); Path nodeAdminJDiskLogsPath = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("logs/vespa/")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", nodeAdminJDiskLogsPath) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); maintainerExecutor.execute(); getMaintenanceThrottlerFor(NODE_ADMIN).updateNextRemoveOldFilesTime(); } /** * Prepares the container-storage for the next container by deleting/archiving all the data of the current container. * Removes old files, reports coredumps and archives container data, runs when container enters state "dirty" */ public void cleanupNodeStorage(ContainerName containerName, NodeSpec node) { MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); addArchiveNodeData(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).reset(); } private void addArchiveNodeData(MaintainerExecutor maintainerExecutor, ContainerName containerName) { maintainerExecutor.addJob("recursive-delete") .withArgument("path", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var"))); maintainerExecutor.addJob("move-files") .withArgument("from", environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/"))) .withArgument("to", environment.pathInNodeAdminToNodeCleanup(containerName)); } /** * Runs node-maintainer's SpecVerifier and returns its output * @param node Node specification containing the excepted values we want to verify against * @return new combined hardware divergence * @throws RuntimeException if exit code != 0 */ public String getHardwareDivergence(NodeSpec node) { List<String> arguments = new ArrayList<>(Arrays.asList("specification", "--disk", Double.toString(node.getMinDiskAvailableGb()), "--memory", Double.toString(node.getMinMainMemoryAvailableGb()), "--cpu_cores", Double.toString(node.getMinCpuCores()), "--is_ssd", Boolean.toString(node.isFastDisk()), "--ips", String.join(",", node.getIpAddresses()))); node.getHardwareDivergence().ifPresent(hardwareDivergence -> { arguments.add("--divergence"); arguments.add(hardwareDivergence); }); return executeMaintainer("com.yahoo.vespa.hosted.node.verification.Main", arguments.toArray(new String[0])); } private String executeMaintainer(String mainClass, String... args) { String[] command = Stream.concat( Stream.of("sudo", "VESPA_HOME=" + getDefaults().vespaHome(), getDefaults().underVespaHome("libexec/vespa/node-admin/maintenance.sh"), mainClass), Stream.of(args)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(command); if (result.getFirst() != 0) { numberOfNodeAdminMaintenanceFails.add(); throw new RuntimeException( String.format("Maintainer failed to execute command: %s, Exit code: %d, Stdout/stderr: %s", Arrays.toString(command), result.getFirst(), result.getSecond())); } return result.getSecond().trim(); } catch (IOException e) { throw new RuntimeException("Failed to execute maintainer", e); } } /** * Wrapper for node-admin-maintenance, queues up maintenances jobs and sends a single request to maintenance JVM */ private class MaintainerExecutor { private final List<MaintainerExecutorJob> jobs = new ArrayList<>(); MaintainerExecutorJob addJob(String jobName) { MaintainerExecutorJob job = new MaintainerExecutorJob(jobName); jobs.add(job); return job; } void execute() { String args; try { args = objectMapper.writeValueAsString(jobs); } catch (JsonProcessingException e) { throw new RuntimeException("Failed transform list of maintenance jobs to JSON"); } executeMaintainer("com.yahoo.vespa.hosted.node.maintainer.Maintainer", args); } } private class MaintainerExecutorJob { @JsonProperty(value="type") private final String type; @JsonProperty(value="arguments") private final Map<String, Object> arguments = new HashMap<>(); MaintainerExecutorJob(String type) { this.type = type; } MaintainerExecutorJob withArgument(String argument, Object value) { arguments.put(argument, (value instanceof Path) ? value.toString() : value); return this; } } private MaintenanceThrottler getMaintenanceThrottlerFor(ContainerName containerName) { maintenanceThrottlerByContainerName.putIfAbsent(containerName, new MaintenanceThrottler()); return maintenanceThrottlerByContainerName.get(containerName); } private class MaintenanceThrottler { private Instant nextRemoveOldFilesAt = Instant.EPOCH; private Instant nextHandleOldCoredumpsAt = Instant.EPOCH; void updateNextRemoveOldFilesTime() { nextRemoveOldFilesAt = clock.instant().plus(Duration.ofHours(1)); } boolean shouldRemoveOldFilesNow() { return !nextRemoveOldFilesAt.isAfter(clock.instant()); } void updateNextHandleCoredumpsTime() { nextHandleOldCoredumpsAt = clock.instant().plus(Duration.ofMinutes(5)); } boolean shouldHandleCoredumpsNow() { return !nextHandleOldCoredumpsAt.isAfter(clock.instant()); } void reset() { nextRemoveOldFilesAt = Instant.EPOCH; nextHandleOldCoredumpsAt = Instant.EPOCH; } } }
class StorageMaintainer { private static final ContainerName NODE_ADMIN = new ContainerName("node-admin"); private static final ObjectMapper objectMapper = new ObjectMapper(); private final GaugeWrapper numberOfCoredumpsOnHost; private final CounterWrapper numberOfNodeAdminMaintenanceFails; private final DockerOperations dockerOperations; private final ProcessExecuter processExecuter; private final Environment environment; private final Clock clock; private Map<ContainerName, MaintenanceThrottler> maintenanceThrottlerByContainerName = new ConcurrentHashMap<>(); public StorageMaintainer(DockerOperations dockerOperations, ProcessExecuter processExecuter, MetricReceiverWrapper metricReceiver, Environment environment, Clock clock) { this.dockerOperations = dockerOperations; this.processExecuter = processExecuter; this.environment = environment; this.clock = clock; Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); numberOfNodeAdminMaintenanceFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.maintenance.fails"); numberOfCoredumpsOnHost = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.coredumps"); } private SecretAgentCheckConfig annotatedCheck(NodeSpec node, SecretAgentCheckConfig check) { check.withTag("namespace", "Vespa") .withTag("role", "tenants") .withTag("flavor", node.getFlavor()) .withTag("canonicalFlavor", node.getCanonicalFlavor()) .withTag("state", node.getState().toString()) .withTag("zone", environment.getZone()) .withTag("parentHostname", environment.getParentHostHostname()); node.getOwner().ifPresent(owner -> check .withTag("tenantName", owner.getTenant()) .withTag("app", owner.getApplication() + "." + owner.getInstance()) .withTag("applicationName", owner.getApplication()) .withTag("instanceName", owner.getInstance()) .withTag("applicationId", owner.getTenant() + "." + owner.getApplication() + "." + owner.getInstance())); node.getMembership().ifPresent(membership -> check .withTag("clustertype", membership.getClusterType()) .withTag("clusterid", membership.getClusterId())); node.getVespaVersion().ifPresent(version -> check.withTag("vespaVersion", version)); return check; } public void writeFilebeatConfig(ContainerName containerName, NodeSpec node) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); try { FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment); Optional<String> config = filebeatConfigProvider.getConfig(node); if (!config.isPresent()) { logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + node.toString()); return; } Path filebeatPath = environment.pathInNodeAdminFromPathInNode( containerName, Paths.get("/etc/filebeat/filebeat.yml")); Files.write(filebeatPath, config.get().getBytes()); logger.info("Wrote filebeat config."); } catch (Throwable t) { logger.error("Failed writing filebeat config; " + node, t); } } public Optional<Long> getDiskUsageFor(ContainerName containerName) { Path containerDir = environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/home/")); try { return Optional.of(getDiskUsedInBytes(containerDir)); } catch (Throwable e) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); logger.error("Problems during disk usage calculations in " + containerDir.toAbsolutePath(), e); return Optional.empty(); } } long getDiskUsedInBytes(Path path) throws IOException, InterruptedException { if (!Files.exists(path)) { return 0; } final String[] command = {"du", "-xsk", path.toString()}; Process duCommand = new ProcessBuilder().command(command).start(); if (!duCommand.waitFor(60, TimeUnit.SECONDS)) { duCommand.destroy(); throw new RuntimeException("Disk usage command timed out, aborting."); } String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream())); String[] results = output.split("\t"); if (results.length != 2) { throw new RuntimeException("Result from disk usage command not as expected: " + output); } long diskUsageKB = Long.valueOf(results[0]); return diskUsageKB * 1024; } /** * Deletes old log files for vespa, nginx, logstash, etc. */ public void removeOldFilesFromNode(ContainerName containerName) { if (! getMaintenanceThrottlerFor(containerName).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextRemoveOldFilesTime(); } private void addRemoveOldFilesCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName) { Path[] pathsToClean = { environment.pathInNodeUnderVespaHome("logs/elasticsearch2"), environment.pathInNodeUnderVespaHome("logs/logstash2"), environment.pathInNodeUnderVespaHome("logs/daemontools_y"), environment.pathInNodeUnderVespaHome("logs/nginx"), environment.pathInNodeUnderVespaHome("logs/vespa") }; for (Path pathToClean : pathsToClean) { Path path = environment.pathInNodeAdminFromPathInNode(containerName, pathToClean); if (Files.exists(path)) { maintainerExecutor.addJob("delete-files") .withArgument("basePath", path) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*\\.log.+") .withArgument("recursive", false); } } Path qrsDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/qrs")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", qrsDir) .withArgument("maxAgeSeconds", Duration.ofDays(3).getSeconds()) .withArgument("fileNameRegex", ".*QueryAccessLog.*") .withArgument("recursive", false); Path logArchiveDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("logs/vespa/logarchive")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", logArchiveDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); } /** * Checks if container has any new coredumps, reports and archives them if so * * @param force Set to true to bypass throttling */ public void handleCoreDumpsForContainer(ContainerName containerName, NodeSpec node, boolean force) { try { numberOfCoredumpsOnHost.sample(Files.list(environment.pathInNodeAdminToDoneCoredumps()).count()); } catch (IOException e) { } if (! getMaintenanceThrottlerFor(containerName).shouldHandleCoredumpsNow() && !force) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).updateNextHandleCoredumpsTime(); } private void addHandleCoredumpsCommand(MaintainerExecutor maintainerExecutor, ContainerName containerName, NodeSpec node) { if (!environment.getCoredumpFeedEndpoint().isPresent()) { return; } Map<String, Object> attributes = new HashMap<>(); attributes.put("hostname", node.getHostname()); attributes.put("parent_hostname", environment.getParentHostHostname()); attributes.put("region", environment.getRegion()); attributes.put("environment", environment.getEnvironment()); attributes.put("flavor", node.getFlavor()); attributes.put("kernel_version", System.getProperty("os.version")); node.getCurrentDockerImage().ifPresent(image -> attributes.put("docker_image", image.asString())); node.getVespaVersion().ifPresent(version -> attributes.put("vespa_version", version)); node.getOwner().ifPresent(owner -> { attributes.put("tenant", owner.getTenant()); attributes.put("application", owner.getApplication()); attributes.put("instance", owner.getInstance()); }); maintainerExecutor.addJob("handle-core-dumps") .withArgument("doneCoredumpsPath", environment.pathInNodeAdminToDoneCoredumps()) .withArgument("coredumpsPath", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var/crash"))) .withArgument("feedEndpoint", environment.getCoredumpFeedEndpoint().get()) .withArgument("attributes", attributes); } /** * Deletes old * * archived app data * * Vespa logs * * Filedistribution files */ public void cleanNodeAdmin() { if (! getMaintenanceThrottlerFor(NODE_ADMIN).shouldRemoveOldFilesNow()) return; MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); maintainerExecutor.addJob("delete-directories") .withArgument("basePath", environment.getPathResolver().getApplicationStoragePathForNodeAdmin()) .withArgument("maxAgeSeconds", Duration.ofDays(7).getSeconds()) .withArgument("dirNameRegex", "^" + Pattern.quote(Environment.APPLICATION_STORAGE_CLEANUP_PATH_PREFIX)); Path nodeAdminJDiskLogsPath = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("logs/vespa/")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", nodeAdminJDiskLogsPath) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", false); Path fileDistrDir = environment.pathInNodeAdminFromPathInNode( NODE_ADMIN, environment.pathInNodeUnderVespaHome("var/db/vespa/filedistribution")); maintainerExecutor.addJob("delete-files") .withArgument("basePath", fileDistrDir) .withArgument("maxAgeSeconds", Duration.ofDays(31).getSeconds()) .withArgument("recursive", true); maintainerExecutor.execute(); getMaintenanceThrottlerFor(NODE_ADMIN).updateNextRemoveOldFilesTime(); } /** * Prepares the container-storage for the next container by deleting/archiving all the data of the current container. * Removes old files, reports coredumps and archives container data, runs when container enters state "dirty" */ public void cleanupNodeStorage(ContainerName containerName, NodeSpec node) { MaintainerExecutor maintainerExecutor = new MaintainerExecutor(); addRemoveOldFilesCommand(maintainerExecutor, containerName); addHandleCoredumpsCommand(maintainerExecutor, containerName, node); addArchiveNodeData(maintainerExecutor, containerName); maintainerExecutor.execute(); getMaintenanceThrottlerFor(containerName).reset(); } private void addArchiveNodeData(MaintainerExecutor maintainerExecutor, ContainerName containerName) { maintainerExecutor.addJob("recursive-delete") .withArgument("path", environment.pathInNodeAdminFromPathInNode( containerName, environment.pathInNodeUnderVespaHome("var"))); maintainerExecutor.addJob("move-files") .withArgument("from", environment.pathInNodeAdminFromPathInNode(containerName, Paths.get("/"))) .withArgument("to", environment.pathInNodeAdminToNodeCleanup(containerName)); } /** * Runs node-maintainer's SpecVerifier and returns its output * @param node Node specification containing the excepted values we want to verify against * @return new combined hardware divergence * @throws RuntimeException if exit code != 0 */ public String getHardwareDivergence(NodeSpec node) { List<String> arguments = new ArrayList<>(Arrays.asList("specification", "--disk", Double.toString(node.getMinDiskAvailableGb()), "--memory", Double.toString(node.getMinMainMemoryAvailableGb()), "--cpu_cores", Double.toString(node.getMinCpuCores()), "--is_ssd", Boolean.toString(node.isFastDisk()), "--ips", String.join(",", node.getIpAddresses()))); node.getHardwareDivergence().ifPresent(hardwareDivergence -> { arguments.add("--divergence"); arguments.add(hardwareDivergence); }); return executeMaintainer("com.yahoo.vespa.hosted.node.verification.Main", arguments.toArray(new String[0])); } private String executeMaintainer(String mainClass, String... args) { String[] command = Stream.concat( Stream.of("sudo", "VESPA_HOME=" + getDefaults().vespaHome(), getDefaults().underVespaHome("libexec/vespa/node-admin/maintenance.sh"), mainClass), Stream.of(args)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(command); if (result.getFirst() != 0) { numberOfNodeAdminMaintenanceFails.add(); throw new RuntimeException( String.format("Maintainer failed to execute command: %s, Exit code: %d, Stdout/stderr: %s", Arrays.toString(command), result.getFirst(), result.getSecond())); } return result.getSecond().trim(); } catch (IOException e) { throw new RuntimeException("Failed to execute maintainer", e); } } /** * Wrapper for node-admin-maintenance, queues up maintenances jobs and sends a single request to maintenance JVM */ private class MaintainerExecutor { private final List<MaintainerExecutorJob> jobs = new ArrayList<>(); MaintainerExecutorJob addJob(String jobName) { MaintainerExecutorJob job = new MaintainerExecutorJob(jobName); jobs.add(job); return job; } void execute() { String args; try { args = objectMapper.writeValueAsString(jobs); } catch (JsonProcessingException e) { throw new RuntimeException("Failed transform list of maintenance jobs to JSON"); } executeMaintainer("com.yahoo.vespa.hosted.node.maintainer.Maintainer", args); } } private class MaintainerExecutorJob { @JsonProperty(value="type") private final String type; @JsonProperty(value="arguments") private final Map<String, Object> arguments = new HashMap<>(); MaintainerExecutorJob(String type) { this.type = type; } MaintainerExecutorJob withArgument(String argument, Object value) { arguments.put(argument, (value instanceof Path) ? value.toString() : value); return this; } } private MaintenanceThrottler getMaintenanceThrottlerFor(ContainerName containerName) { maintenanceThrottlerByContainerName.putIfAbsent(containerName, new MaintenanceThrottler()); return maintenanceThrottlerByContainerName.get(containerName); } private class MaintenanceThrottler { private Instant nextRemoveOldFilesAt = Instant.EPOCH; private Instant nextHandleOldCoredumpsAt = Instant.EPOCH; void updateNextRemoveOldFilesTime() { nextRemoveOldFilesAt = clock.instant().plus(Duration.ofHours(1)); } boolean shouldRemoveOldFilesNow() { return !nextRemoveOldFilesAt.isAfter(clock.instant()); } void updateNextHandleCoredumpsTime() { nextHandleOldCoredumpsAt = clock.instant().plus(Duration.ofMinutes(5)); } boolean shouldHandleCoredumpsNow() { return !nextHandleOldCoredumpsAt.isAfter(clock.instant()); } void reset() { nextRemoveOldFilesAt = Instant.EPOCH; nextHandleOldCoredumpsAt = Instant.EPOCH; } } }
Exceptions are caught in the client, and result in `empty()`. This needs to change first, i.e., catch only when expected.
private Version currentVersion(ZoneId zone, ApplicationId application, Version defaultVersion) { return minVersion(zone, application, Node::currentVersion).orElse(defaultVersion); }
return minVersion(zone, application, Node::currentVersion).orElse(defaultVersion);
private Version currentVersion(ZoneId zone, ApplicationId application, Version defaultVersion) { return minVersion(zone, application, Node::currentVersion).orElse(defaultVersion); }
class SystemUpgrader extends Maintainer { private static final Logger log = Logger.getLogger(SystemUpgrader.class.getName()); public SystemUpgrader(Controller controller, Duration interval, JobControl jobControl) { super(controller, interval, jobControl); } @Override protected void maintain() { Optional<Version> target = targetVersion(); if (!target.isPresent()) { return; } try { deploy(SystemApplication.all(), target.get()); } catch (Exception e) { log.log(Level.WARNING, "Failed to upgrade system. Retrying in " + maintenanceInterval(), e); } } /** Deploy a list of system applications until they converge on the given version */ private void deploy(List<SystemApplication> applications, Version target) { for (List<ZoneId> zones : controller().zoneRegistry().upgradePolicy().asList()) { boolean converged = true; for (ZoneId zone : zones) { for (SystemApplication application : applications) { boolean dependenciesConverged = application.dependencies().stream() .allMatch(dependency -> convergedOn(zone, dependency, target)); if (dependenciesConverged) { deploy(target, application, zone); } converged &= convergedOn(zone, application, target); } } if (!converged) { break; } } } /** Deploy application on given version idempotently */ private void deploy(Version target, SystemApplication application, ZoneId zone) { if (!wantedVersion(zone, application.id(), target).equals(target)) { log.info(String.format("Deploying %s version %s in %s", application.id(), target, zone)); controller().applications().deploy(application, zone, target); } } private boolean convergedOn(ZoneId zone, SystemApplication application, Version target) { return currentVersion(zone, application.id(), target).equals(target); } private Version wantedVersion(ZoneId zone, ApplicationId application, Version defaultVersion) { return minVersion(zone, application, Node::wantedVersion).orElse(defaultVersion); } private Optional<Version> minVersion(ZoneId zone, ApplicationId application, Function<Node, Version> versionField) { try { return controller().configServer() .nodeRepository() .listOperational(zone, application) .stream() .map(versionField) .min(Comparator.naturalOrder()); } catch (Exception e) { log.log(Level.WARNING, String.format("Failed to get version for %s in %s: %s", application, zone, Exceptions.toMessageString(e))); return Optional.empty(); } } /** Returns target version for the system */ private Optional<Version> targetVersion() { return controller().versionStatus().controllerVersion() .filter(vespaVersion -> !vespaVersion.isSystemVersion()) .map(VespaVersion::versionNumber); } }
class SystemUpgrader extends Maintainer { private static final Logger log = Logger.getLogger(SystemUpgrader.class.getName()); public SystemUpgrader(Controller controller, Duration interval, JobControl jobControl) { super(controller, interval, jobControl); } @Override protected void maintain() { Optional<Version> target = targetVersion(); if (!target.isPresent()) { return; } deploy(SystemApplication.all(), target.get()); } /** Deploy a list of system applications until they converge on the given version */ private void deploy(List<SystemApplication> applications, Version target) { for (List<ZoneId> zones : controller().zoneRegistry().upgradePolicy().asList()) { boolean converged = true; for (ZoneId zone : zones) { try { converged &= deployInZone(zone, applications, target); } catch (UnreachableNodeRepositoryException e) { converged = false; log.log(Level.WARNING, e.getMessage() + ". Continuing to next parallel deployed zone"); } catch (Exception e) { converged = false; log.log(Level.WARNING, "Failed to upgrade " + zone + ". Continuing to next parallel deployed zone", e); } } if (!converged) { break; } } } /** @return true if all applications have converged to the target version in the zone */ private boolean deployInZone(ZoneId zone, List<SystemApplication> applications, Version target) { boolean converged = true; for (SystemApplication application : applications) { if (convergedOn(target, application.dependencies(), zone)) { deploy(target, application, zone); } converged &= convergedOn(target, application, zone); } return converged; } /** Deploy application on given version idempotently */ private void deploy(Version target, SystemApplication application, ZoneId zone) { if (!wantedVersion(zone, application.id(), target).equals(target)) { log.info(String.format("Deploying %s version %s in %s", application.id(), target, zone)); controller().applications().deploy(application, zone, target); } } private boolean convergedOn(Version target, List<SystemApplication> applications, ZoneId zone) { return applications.stream().allMatch(application -> convergedOn(target, application, zone)); } private boolean convergedOn(Version target, SystemApplication application, ZoneId zone) { return currentVersion(zone, application.id(), target).equals(target); } private Version wantedVersion(ZoneId zone, ApplicationId application, Version defaultVersion) { return minVersion(zone, application, Node::wantedVersion).orElse(defaultVersion); } private Optional<Version> minVersion(ZoneId zone, ApplicationId application, Function<Node, Version> versionField) { try { return controller().configServer() .nodeRepository() .listOperational(zone, application) .stream() .map(versionField) .min(Comparator.naturalOrder()); } catch (Exception e) { throw new UnreachableNodeRepositoryException(String.format("Failed to get version for %s in %s: %s", application, zone, Exceptions.toMessageString(e))); } } /** Returns target version for the system */ private Optional<Version> targetVersion() { return controller().versionStatus().controllerVersion() .filter(vespaVersion -> !vespaVersion.isSystemVersion()) .map(VespaVersion::versionNumber); } private class UnreachableNodeRepositoryException extends RuntimeException { private UnreachableNodeRepositoryException(String reason) { super(reason); } } }
Fixed, PTAL.
private Version currentVersion(ZoneId zone, ApplicationId application, Version defaultVersion) { return minVersion(zone, application, Node::currentVersion).orElse(defaultVersion); }
return minVersion(zone, application, Node::currentVersion).orElse(defaultVersion);
private Version currentVersion(ZoneId zone, ApplicationId application, Version defaultVersion) { return minVersion(zone, application, Node::currentVersion).orElse(defaultVersion); }
class SystemUpgrader extends Maintainer { private static final Logger log = Logger.getLogger(SystemUpgrader.class.getName()); public SystemUpgrader(Controller controller, Duration interval, JobControl jobControl) { super(controller, interval, jobControl); } @Override protected void maintain() { Optional<Version> target = targetVersion(); if (!target.isPresent()) { return; } try { deploy(SystemApplication.all(), target.get()); } catch (Exception e) { log.log(Level.WARNING, "Failed to upgrade system. Retrying in " + maintenanceInterval(), e); } } /** Deploy a list of system applications until they converge on the given version */ private void deploy(List<SystemApplication> applications, Version target) { for (List<ZoneId> zones : controller().zoneRegistry().upgradePolicy().asList()) { boolean converged = true; for (ZoneId zone : zones) { for (SystemApplication application : applications) { boolean dependenciesConverged = application.dependencies().stream() .allMatch(dependency -> convergedOn(zone, dependency, target)); if (dependenciesConverged) { deploy(target, application, zone); } converged &= convergedOn(zone, application, target); } } if (!converged) { break; } } } /** Deploy application on given version idempotently */ private void deploy(Version target, SystemApplication application, ZoneId zone) { if (!wantedVersion(zone, application.id(), target).equals(target)) { log.info(String.format("Deploying %s version %s in %s", application.id(), target, zone)); controller().applications().deploy(application, zone, target); } } private boolean convergedOn(ZoneId zone, SystemApplication application, Version target) { return currentVersion(zone, application.id(), target).equals(target); } private Version wantedVersion(ZoneId zone, ApplicationId application, Version defaultVersion) { return minVersion(zone, application, Node::wantedVersion).orElse(defaultVersion); } private Optional<Version> minVersion(ZoneId zone, ApplicationId application, Function<Node, Version> versionField) { try { return controller().configServer() .nodeRepository() .listOperational(zone, application) .stream() .map(versionField) .min(Comparator.naturalOrder()); } catch (Exception e) { log.log(Level.WARNING, String.format("Failed to get version for %s in %s: %s", application, zone, Exceptions.toMessageString(e))); return Optional.empty(); } } /** Returns target version for the system */ private Optional<Version> targetVersion() { return controller().versionStatus().controllerVersion() .filter(vespaVersion -> !vespaVersion.isSystemVersion()) .map(VespaVersion::versionNumber); } }
class SystemUpgrader extends Maintainer { private static final Logger log = Logger.getLogger(SystemUpgrader.class.getName()); public SystemUpgrader(Controller controller, Duration interval, JobControl jobControl) { super(controller, interval, jobControl); } @Override protected void maintain() { Optional<Version> target = targetVersion(); if (!target.isPresent()) { return; } deploy(SystemApplication.all(), target.get()); } /** Deploy a list of system applications until they converge on the given version */ private void deploy(List<SystemApplication> applications, Version target) { for (List<ZoneId> zones : controller().zoneRegistry().upgradePolicy().asList()) { boolean converged = true; for (ZoneId zone : zones) { try { converged &= deployInZone(zone, applications, target); } catch (UnreachableNodeRepositoryException e) { converged = false; log.log(Level.WARNING, e.getMessage() + ". Continuing to next parallel deployed zone"); } catch (Exception e) { converged = false; log.log(Level.WARNING, "Failed to upgrade " + zone + ". Continuing to next parallel deployed zone", e); } } if (!converged) { break; } } } /** @return true if all applications have converged to the target version in the zone */ private boolean deployInZone(ZoneId zone, List<SystemApplication> applications, Version target) { boolean converged = true; for (SystemApplication application : applications) { if (convergedOn(target, application.dependencies(), zone)) { deploy(target, application, zone); } converged &= convergedOn(target, application, zone); } return converged; } /** Deploy application on given version idempotently */ private void deploy(Version target, SystemApplication application, ZoneId zone) { if (!wantedVersion(zone, application.id(), target).equals(target)) { log.info(String.format("Deploying %s version %s in %s", application.id(), target, zone)); controller().applications().deploy(application, zone, target); } } private boolean convergedOn(Version target, List<SystemApplication> applications, ZoneId zone) { return applications.stream().allMatch(application -> convergedOn(target, application, zone)); } private boolean convergedOn(Version target, SystemApplication application, ZoneId zone) { return currentVersion(zone, application.id(), target).equals(target); } private Version wantedVersion(ZoneId zone, ApplicationId application, Version defaultVersion) { return minVersion(zone, application, Node::wantedVersion).orElse(defaultVersion); } private Optional<Version> minVersion(ZoneId zone, ApplicationId application, Function<Node, Version> versionField) { try { return controller().configServer() .nodeRepository() .listOperational(zone, application) .stream() .map(versionField) .min(Comparator.naturalOrder()); } catch (Exception e) { throw new UnreachableNodeRepositoryException(String.format("Failed to get version for %s in %s: %s", application, zone, Exceptions.toMessageString(e))); } } /** Returns target version for the system */ private Optional<Version> targetVersion() { return controller().versionStatus().controllerVersion() .filter(vespaVersion -> !vespaVersion.isSystemVersion()) .map(VespaVersion::versionNumber); } private class UnreachableNodeRepositoryException extends RuntimeException { private UnreachableNodeRepositoryException(String reason) { super(reason); } } }
I suggest extracting a method for this. You can overload `convergedOn(Version, List<SystemApplication>, Zone)` so the next block becomes: `if (convergedOn(version, application.dependencies(), zone))`
private boolean deployInZone(ZoneId zone, List<SystemApplication> applications, Version target) { boolean converged = true; for (SystemApplication application : applications) { boolean dependenciesConverged = application.dependencies().stream() .allMatch(dependency -> convergedOn(zone, dependency, target)); if (dependenciesConverged) { deploy(target, application, zone); } converged &= convergedOn(zone, application, target); } return converged; }
boolean dependenciesConverged = application.dependencies().stream()
private boolean deployInZone(ZoneId zone, List<SystemApplication> applications, Version target) { boolean converged = true; for (SystemApplication application : applications) { if (convergedOn(target, application.dependencies(), zone)) { deploy(target, application, zone); } converged &= convergedOn(target, application, zone); } return converged; }
class SystemUpgrader extends Maintainer { private static final Logger log = Logger.getLogger(SystemUpgrader.class.getName()); public SystemUpgrader(Controller controller, Duration interval, JobControl jobControl) { super(controller, interval, jobControl); } @Override protected void maintain() { Optional<Version> target = targetVersion(); if (!target.isPresent()) { return; } deploy(SystemApplication.all(), target.get()); } /** Deploy a list of system applications until they converge on the given version */ private void deploy(List<SystemApplication> applications, Version target) { for (List<ZoneId> zones : controller().zoneRegistry().upgradePolicy().asList()) { boolean converged = true; for (ZoneId zone : zones) { try { converged &= deployInZone(zone, applications, target); } catch (UnreachableNodeRepositoryException e) { converged = false; log.log(Level.WARNING, e.getMessage() + ". Continuing to next parallel deployed zone"); } catch (Exception e) { converged = false; log.log(Level.WARNING, "Failed to upgrade " + zone + ". Continuing to next parallel deployed zone", e); } } if (!converged) { break; } } } /** @return true if all applications have converged to the target version in the zone */ /** Deploy application on given version idempotently */ private void deploy(Version target, SystemApplication application, ZoneId zone) { if (!wantedVersion(zone, application.id(), target).equals(target)) { log.info(String.format("Deploying %s version %s in %s", application.id(), target, zone)); controller().applications().deploy(application, zone, target); } } private boolean convergedOn(ZoneId zone, SystemApplication application, Version target) { return currentVersion(zone, application.id(), target).equals(target); } private Version wantedVersion(ZoneId zone, ApplicationId application, Version defaultVersion) { return minVersion(zone, application, Node::wantedVersion).orElse(defaultVersion); } private Version currentVersion(ZoneId zone, ApplicationId application, Version defaultVersion) { return minVersion(zone, application, Node::currentVersion).orElse(defaultVersion); } private Optional<Version> minVersion(ZoneId zone, ApplicationId application, Function<Node, Version> versionField) { try { return controller().configServer() .nodeRepository() .listOperational(zone, application) .stream() .map(versionField) .min(Comparator.naturalOrder()); } catch (Exception e) { throw new UnreachableNodeRepositoryException(String.format("Failed to get version for %s in %s: %s", application, zone, Exceptions.toMessageString(e))); } } /** Returns target version for the system */ private Optional<Version> targetVersion() { return controller().versionStatus().controllerVersion() .filter(vespaVersion -> !vespaVersion.isSystemVersion()) .map(VespaVersion::versionNumber); } private class UnreachableNodeRepositoryException extends RuntimeException { private UnreachableNodeRepositoryException(String reason) { super(reason); } } }
class SystemUpgrader extends Maintainer { private static final Logger log = Logger.getLogger(SystemUpgrader.class.getName()); public SystemUpgrader(Controller controller, Duration interval, JobControl jobControl) { super(controller, interval, jobControl); } @Override protected void maintain() { Optional<Version> target = targetVersion(); if (!target.isPresent()) { return; } deploy(SystemApplication.all(), target.get()); } /** Deploy a list of system applications until they converge on the given version */ private void deploy(List<SystemApplication> applications, Version target) { for (List<ZoneId> zones : controller().zoneRegistry().upgradePolicy().asList()) { boolean converged = true; for (ZoneId zone : zones) { try { converged &= deployInZone(zone, applications, target); } catch (UnreachableNodeRepositoryException e) { converged = false; log.log(Level.WARNING, e.getMessage() + ". Continuing to next parallel deployed zone"); } catch (Exception e) { converged = false; log.log(Level.WARNING, "Failed to upgrade " + zone + ". Continuing to next parallel deployed zone", e); } } if (!converged) { break; } } } /** @return true if all applications have converged to the target version in the zone */ /** Deploy application on given version idempotently */ private void deploy(Version target, SystemApplication application, ZoneId zone) { if (!wantedVersion(zone, application.id(), target).equals(target)) { log.info(String.format("Deploying %s version %s in %s", application.id(), target, zone)); controller().applications().deploy(application, zone, target); } } private boolean convergedOn(Version target, List<SystemApplication> applications, ZoneId zone) { return applications.stream().allMatch(application -> convergedOn(target, application, zone)); } private boolean convergedOn(Version target, SystemApplication application, ZoneId zone) { return currentVersion(zone, application.id(), target).equals(target); } private Version wantedVersion(ZoneId zone, ApplicationId application, Version defaultVersion) { return minVersion(zone, application, Node::wantedVersion).orElse(defaultVersion); } private Version currentVersion(ZoneId zone, ApplicationId application, Version defaultVersion) { return minVersion(zone, application, Node::currentVersion).orElse(defaultVersion); } private Optional<Version> minVersion(ZoneId zone, ApplicationId application, Function<Node, Version> versionField) { try { return controller().configServer() .nodeRepository() .listOperational(zone, application) .stream() .map(versionField) .min(Comparator.naturalOrder()); } catch (Exception e) { throw new UnreachableNodeRepositoryException(String.format("Failed to get version for %s in %s: %s", application, zone, Exceptions.toMessageString(e))); } } /** Returns target version for the system */ private Optional<Version> targetVersion() { return controller().versionStatus().controllerVersion() .filter(vespaVersion -> !vespaVersion.isSystemVersion()) .map(VespaVersion::versionNumber); } private class UnreachableNodeRepositoryException extends RuntimeException { private UnreachableNodeRepositoryException(String reason) { super(reason); } } }
Fixed.
private boolean deployInZone(ZoneId zone, List<SystemApplication> applications, Version target) { boolean converged = true; for (SystemApplication application : applications) { boolean dependenciesConverged = application.dependencies().stream() .allMatch(dependency -> convergedOn(zone, dependency, target)); if (dependenciesConverged) { deploy(target, application, zone); } converged &= convergedOn(zone, application, target); } return converged; }
boolean dependenciesConverged = application.dependencies().stream()
private boolean deployInZone(ZoneId zone, List<SystemApplication> applications, Version target) { boolean converged = true; for (SystemApplication application : applications) { if (convergedOn(target, application.dependencies(), zone)) { deploy(target, application, zone); } converged &= convergedOn(target, application, zone); } return converged; }
class SystemUpgrader extends Maintainer { private static final Logger log = Logger.getLogger(SystemUpgrader.class.getName()); public SystemUpgrader(Controller controller, Duration interval, JobControl jobControl) { super(controller, interval, jobControl); } @Override protected void maintain() { Optional<Version> target = targetVersion(); if (!target.isPresent()) { return; } deploy(SystemApplication.all(), target.get()); } /** Deploy a list of system applications until they converge on the given version */ private void deploy(List<SystemApplication> applications, Version target) { for (List<ZoneId> zones : controller().zoneRegistry().upgradePolicy().asList()) { boolean converged = true; for (ZoneId zone : zones) { try { converged &= deployInZone(zone, applications, target); } catch (UnreachableNodeRepositoryException e) { converged = false; log.log(Level.WARNING, e.getMessage() + ". Continuing to next parallel deployed zone"); } catch (Exception e) { converged = false; log.log(Level.WARNING, "Failed to upgrade " + zone + ". Continuing to next parallel deployed zone", e); } } if (!converged) { break; } } } /** @return true if all applications have converged to the target version in the zone */ /** Deploy application on given version idempotently */ private void deploy(Version target, SystemApplication application, ZoneId zone) { if (!wantedVersion(zone, application.id(), target).equals(target)) { log.info(String.format("Deploying %s version %s in %s", application.id(), target, zone)); controller().applications().deploy(application, zone, target); } } private boolean convergedOn(ZoneId zone, SystemApplication application, Version target) { return currentVersion(zone, application.id(), target).equals(target); } private Version wantedVersion(ZoneId zone, ApplicationId application, Version defaultVersion) { return minVersion(zone, application, Node::wantedVersion).orElse(defaultVersion); } private Version currentVersion(ZoneId zone, ApplicationId application, Version defaultVersion) { return minVersion(zone, application, Node::currentVersion).orElse(defaultVersion); } private Optional<Version> minVersion(ZoneId zone, ApplicationId application, Function<Node, Version> versionField) { try { return controller().configServer() .nodeRepository() .listOperational(zone, application) .stream() .map(versionField) .min(Comparator.naturalOrder()); } catch (Exception e) { throw new UnreachableNodeRepositoryException(String.format("Failed to get version for %s in %s: %s", application, zone, Exceptions.toMessageString(e))); } } /** Returns target version for the system */ private Optional<Version> targetVersion() { return controller().versionStatus().controllerVersion() .filter(vespaVersion -> !vespaVersion.isSystemVersion()) .map(VespaVersion::versionNumber); } private class UnreachableNodeRepositoryException extends RuntimeException { private UnreachableNodeRepositoryException(String reason) { super(reason); } } }
class SystemUpgrader extends Maintainer { private static final Logger log = Logger.getLogger(SystemUpgrader.class.getName()); public SystemUpgrader(Controller controller, Duration interval, JobControl jobControl) { super(controller, interval, jobControl); } @Override protected void maintain() { Optional<Version> target = targetVersion(); if (!target.isPresent()) { return; } deploy(SystemApplication.all(), target.get()); } /** Deploy a list of system applications until they converge on the given version */ private void deploy(List<SystemApplication> applications, Version target) { for (List<ZoneId> zones : controller().zoneRegistry().upgradePolicy().asList()) { boolean converged = true; for (ZoneId zone : zones) { try { converged &= deployInZone(zone, applications, target); } catch (UnreachableNodeRepositoryException e) { converged = false; log.log(Level.WARNING, e.getMessage() + ". Continuing to next parallel deployed zone"); } catch (Exception e) { converged = false; log.log(Level.WARNING, "Failed to upgrade " + zone + ". Continuing to next parallel deployed zone", e); } } if (!converged) { break; } } } /** @return true if all applications have converged to the target version in the zone */ /** Deploy application on given version idempotently */ private void deploy(Version target, SystemApplication application, ZoneId zone) { if (!wantedVersion(zone, application.id(), target).equals(target)) { log.info(String.format("Deploying %s version %s in %s", application.id(), target, zone)); controller().applications().deploy(application, zone, target); } } private boolean convergedOn(Version target, List<SystemApplication> applications, ZoneId zone) { return applications.stream().allMatch(application -> convergedOn(target, application, zone)); } private boolean convergedOn(Version target, SystemApplication application, ZoneId zone) { return currentVersion(zone, application.id(), target).equals(target); } private Version wantedVersion(ZoneId zone, ApplicationId application, Version defaultVersion) { return minVersion(zone, application, Node::wantedVersion).orElse(defaultVersion); } private Version currentVersion(ZoneId zone, ApplicationId application, Version defaultVersion) { return minVersion(zone, application, Node::currentVersion).orElse(defaultVersion); } private Optional<Version> minVersion(ZoneId zone, ApplicationId application, Function<Node, Version> versionField) { try { return controller().configServer() .nodeRepository() .listOperational(zone, application) .stream() .map(versionField) .min(Comparator.naturalOrder()); } catch (Exception e) { throw new UnreachableNodeRepositoryException(String.format("Failed to get version for %s in %s: %s", application, zone, Exceptions.toMessageString(e))); } } /** Returns target version for the system */ private Optional<Version> targetVersion() { return controller().versionStatus().controllerVersion() .filter(vespaVersion -> !vespaVersion.isSystemVersion()) .map(VespaVersion::versionNumber); } private class UnreachableNodeRepositoryException extends RuntimeException { private UnreachableNodeRepositoryException(String reason) { super(reason); } } }
Should this exception be ignored?
private static Optional<String> detectResourceOrProvider(ClassLoader bundleClassLoader, String classEntry) { try (InputStream inputStream = getResourceAsStream(bundleClassLoader, classEntry)) { ResourceOrProviderClassVisitor visitor = ResourceOrProviderClassVisitor.visit(new ClassReader(inputStream)); return Optional.ofNullable(visitor.getClassName()); } catch (IOException e) { } return Optional.empty(); }
private static Optional<String> detectResourceOrProvider(ClassLoader bundleClassLoader, String classEntry) { try (InputStream inputStream = getResourceAsStream(bundleClassLoader, classEntry)) { ResourceOrProviderClassVisitor visitor = ResourceOrProviderClassVisitor.visit(new ClassReader(inputStream)); return Optional.ofNullable(visitor.getClassName()); } catch (IOException e) { throw new RuntimeException(e); } }
class JerseyServletProvider implements Provider<ServletHolder> { private final ServletHolder jerseyServletHolder; public JerseyServletProvider(RestApiContext restApiContext) { this.jerseyServletHolder = new ServletHolder(new ServletContainer(resourceConfig(restApiContext))); } private ResourceConfig resourceConfig(RestApiContext restApiContext) { final ResourceConfig resourceConfig = ResourceConfig .forApplication(new JerseyApplication(resourcesAndProviders(restApiContext.getBundles()))); registerComponent(resourceConfig, componentInjectorBinder(restApiContext)); registerComponent(resourceConfig, jacksonDatatypeJdk8Provider()); resourceConfig.register(MultiPartFeature.class); return resourceConfig; } private static Collection<Class<?>> resourcesAndProviders(Collection<BundleInfo> bundles) { final List<Class<?>> ret = new ArrayList<>(); for (BundleInfo bundle : bundles) { for (String classEntry : bundle.getClassEntries()) { Optional<String> className = detectResourceOrProvider(bundle.classLoader, classEntry); className.ifPresent(cname -> ret.add(loadClass(bundle.symbolicName, bundle.classLoader, cname))); } } return ret; } private static InputStream getResourceAsStream(ClassLoader bundleClassLoader, String classEntry) { InputStream is = bundleClassLoader.getResourceAsStream(classEntry); if (is == null) { throw new RuntimeException("No entry " + classEntry + " in bundle " + bundleClassLoader); } else { return is; } } private static Class<?> loadClass(String bundleSymbolicName, ClassLoader classLoader, String className) { try { return classLoader.loadClass(className); } catch (Exception e) { throw new RuntimeException("Failed loading class " + className + " from bundle " + bundleSymbolicName, e); } } private static Binder componentInjectorBinder(RestApiContext restApiContext) { final ComponentGraphProvider componentGraphProvider = new ComponentGraphProvider(restApiContext.getInjectableComponents()); final TypeLiteral<InjectionResolver<Component>> componentAnnotationType = new TypeLiteral<InjectionResolver<Component>>() { }; return new AbstractBinder() { @Override public void configure() { bind(componentGraphProvider).to(componentAnnotationType); } }; } private static JacksonJaxbJsonProvider jacksonDatatypeJdk8Provider() { JacksonJaxbJsonProvider provider = new JacksonJaxbJsonProvider(); provider.setMapper(new ObjectMapper().registerModule(new Jdk8Module()).registerModule(new JavaTimeModule())); return provider; } @Override public ServletHolder get() { return jerseyServletHolder; } @Override public void deconstruct() { } }
class JerseyServletProvider implements Provider<ServletHolder> { private final ServletHolder jerseyServletHolder; public JerseyServletProvider(RestApiContext restApiContext) { this.jerseyServletHolder = new ServletHolder(new ServletContainer(resourceConfig(restApiContext))); } private ResourceConfig resourceConfig(RestApiContext restApiContext) { final ResourceConfig resourceConfig = ResourceConfig .forApplication(new JerseyApplication(resourcesAndProviders(restApiContext.getBundles()))); registerComponent(resourceConfig, componentInjectorBinder(restApiContext)); registerComponent(resourceConfig, jacksonDatatypeJdk8Provider()); resourceConfig.register(MultiPartFeature.class); return resourceConfig; } private static Collection<Class<?>> resourcesAndProviders(Collection<BundleInfo> bundles) { final List<Class<?>> ret = new ArrayList<>(); for (BundleInfo bundle : bundles) { for (String classEntry : bundle.getClassEntries()) { Optional<String> className = detectResourceOrProvider(bundle.classLoader, classEntry); className.ifPresent(cname -> ret.add(loadClass(bundle.symbolicName, bundle.classLoader, cname))); } } return ret; } private static InputStream getResourceAsStream(ClassLoader bundleClassLoader, String classEntry) { InputStream is = bundleClassLoader.getResourceAsStream(classEntry); if (is == null) { throw new RuntimeException("No entry " + classEntry + " in bundle " + bundleClassLoader); } else { return is; } } private static Class<?> loadClass(String bundleSymbolicName, ClassLoader classLoader, String className) { try { return classLoader.loadClass(className); } catch (Exception e) { throw new RuntimeException("Failed loading class " + className + " from bundle " + bundleSymbolicName, e); } } private static Binder componentInjectorBinder(RestApiContext restApiContext) { final ComponentGraphProvider componentGraphProvider = new ComponentGraphProvider(restApiContext.getInjectableComponents()); final TypeLiteral<InjectionResolver<Component>> componentAnnotationType = new TypeLiteral<InjectionResolver<Component>>() { }; return new AbstractBinder() { @Override public void configure() { bind(componentGraphProvider).to(componentAnnotationType); } }; } private static JacksonJaxbJsonProvider jacksonDatatypeJdk8Provider() { JacksonJaxbJsonProvider provider = new JacksonJaxbJsonProvider(); provider.setMapper(new ObjectMapper().registerModule(new Jdk8Module()).registerModule(new JavaTimeModule())); return provider; } @Override public ServletHolder get() { return jerseyServletHolder; } @Override public void deconstruct() { } }
Use ASM6 as in @gjoranv's recent PR: https://github.com/vespa-engine/vespa/pull/5919/files#diff-8.
public ResourceOrProviderClassVisitor() { super(Opcodes.ASM5); }
super(Opcodes.ASM5);
public ResourceOrProviderClassVisitor() { super(Opcodes.ASM5); }
class ResourceOrProviderClassVisitor extends ClassVisitor { private String className = null; private boolean isPublic = false; private boolean isAbstract = false; private boolean isInnerClass = false; private boolean isStatic = false; private boolean isAnnotated = false; public Optional<String> getJerseyClassName() { if (isJerseyClass()) { return Optional.of(getClassName()); } else { return Optional.empty(); } } public boolean isJerseyClass() { return isAnnotated && isPublic && !isAbstract && (!isInnerClass || isStatic); } public String getClassName() { assert (className != null); return org.objectweb.asm.Type.getObjectType(className).getClassName(); } @Override public void visit(int version, int access, String name, String signature, String superName, String[] interfaces) { isPublic = isPublic(access); className = name; isAbstract = isAbstract(access); } @Override public void visitInnerClass(String name, String outerName, String innerName, int access) { assert (className != null); if (name.equals(className)) { isInnerClass = true; isStatic = isStatic(access); } } @Override public AnnotationVisitor visitAnnotation(String desc, boolean visible) { isAnnotated |= annotationClassDescriptors.contains(desc); return null; } private static Set<String> annotationClassDescriptors = new HashSet<>(); static { annotationClassDescriptors.add(Type.getDescriptor(Path.class)); annotationClassDescriptors.add(Type.getDescriptor(Provider.class)); } private static boolean isPublic(int access) { return isSet(Opcodes.ACC_PUBLIC, access); } private static boolean isStatic(int access) { return isSet(Opcodes.ACC_STATIC, access); } private static boolean isAbstract(int access) { return isSet(Opcodes.ACC_ABSTRACT, access); } private static boolean isSet(int bits, int access) { return (access & bits) == bits; } public static ResourceOrProviderClassVisitor visit(ClassReader classReader) { ResourceOrProviderClassVisitor visitor = new ResourceOrProviderClassVisitor(); classReader.accept(visitor, ClassReader.SKIP_DEBUG | ClassReader.SKIP_CODE | ClassReader.SKIP_FRAMES); return visitor; } }
class ResourceOrProviderClassVisitor extends ClassVisitor { private String className = null; private boolean isPublic = false; private boolean isAbstract = false; private boolean isInnerClass = false; private boolean isStatic = false; private boolean isAnnotated = false; public Optional<String> getJerseyClassName() { if (isJerseyClass()) { return Optional.of(getClassName()); } else { return Optional.empty(); } } public boolean isJerseyClass() { return isAnnotated && isPublic && !isAbstract && (!isInnerClass || isStatic); } public String getClassName() { assert (className != null); return org.objectweb.asm.Type.getObjectType(className).getClassName(); } @Override public void visit(int version, int access, String name, String signature, String superName, String[] interfaces) { isPublic = isPublic(access); className = name; isAbstract = isAbstract(access); } @Override public void visitInnerClass(String name, String outerName, String innerName, int access) { assert (className != null); if (name.equals(className)) { isInnerClass = true; isStatic = isStatic(access); } } @Override public AnnotationVisitor visitAnnotation(String desc, boolean visible) { isAnnotated |= annotationClassDescriptors.contains(desc); return null; } private static Set<String> annotationClassDescriptors = new HashSet<>(); static { annotationClassDescriptors.add(Type.getDescriptor(Path.class)); annotationClassDescriptors.add(Type.getDescriptor(Provider.class)); } private static boolean isPublic(int access) { return isSet(Opcodes.ACC_PUBLIC, access); } private static boolean isStatic(int access) { return isSet(Opcodes.ACC_STATIC, access); } private static boolean isAbstract(int access) { return isSet(Opcodes.ACC_ABSTRACT, access); } private static boolean isSet(int bits, int access) { return (access & bits) == bits; } public static ResourceOrProviderClassVisitor visit(ClassReader classReader) { ResourceOrProviderClassVisitor visitor = new ResourceOrProviderClassVisitor(); classReader.accept(visitor, ClassReader.SKIP_DEBUG | ClassReader.SKIP_CODE | ClassReader.SKIP_FRAMES); return visitor; } }
EDIT: Noticed that the PR was not merged. Ignore above comment.
public ResourceOrProviderClassVisitor() { super(Opcodes.ASM5); }
super(Opcodes.ASM5);
public ResourceOrProviderClassVisitor() { super(Opcodes.ASM5); }
class ResourceOrProviderClassVisitor extends ClassVisitor { private String className = null; private boolean isPublic = false; private boolean isAbstract = false; private boolean isInnerClass = false; private boolean isStatic = false; private boolean isAnnotated = false; public Optional<String> getJerseyClassName() { if (isJerseyClass()) { return Optional.of(getClassName()); } else { return Optional.empty(); } } public boolean isJerseyClass() { return isAnnotated && isPublic && !isAbstract && (!isInnerClass || isStatic); } public String getClassName() { assert (className != null); return org.objectweb.asm.Type.getObjectType(className).getClassName(); } @Override public void visit(int version, int access, String name, String signature, String superName, String[] interfaces) { isPublic = isPublic(access); className = name; isAbstract = isAbstract(access); } @Override public void visitInnerClass(String name, String outerName, String innerName, int access) { assert (className != null); if (name.equals(className)) { isInnerClass = true; isStatic = isStatic(access); } } @Override public AnnotationVisitor visitAnnotation(String desc, boolean visible) { isAnnotated |= annotationClassDescriptors.contains(desc); return null; } private static Set<String> annotationClassDescriptors = new HashSet<>(); static { annotationClassDescriptors.add(Type.getDescriptor(Path.class)); annotationClassDescriptors.add(Type.getDescriptor(Provider.class)); } private static boolean isPublic(int access) { return isSet(Opcodes.ACC_PUBLIC, access); } private static boolean isStatic(int access) { return isSet(Opcodes.ACC_STATIC, access); } private static boolean isAbstract(int access) { return isSet(Opcodes.ACC_ABSTRACT, access); } private static boolean isSet(int bits, int access) { return (access & bits) == bits; } public static ResourceOrProviderClassVisitor visit(ClassReader classReader) { ResourceOrProviderClassVisitor visitor = new ResourceOrProviderClassVisitor(); classReader.accept(visitor, ClassReader.SKIP_DEBUG | ClassReader.SKIP_CODE | ClassReader.SKIP_FRAMES); return visitor; } }
class ResourceOrProviderClassVisitor extends ClassVisitor { private String className = null; private boolean isPublic = false; private boolean isAbstract = false; private boolean isInnerClass = false; private boolean isStatic = false; private boolean isAnnotated = false; public Optional<String> getJerseyClassName() { if (isJerseyClass()) { return Optional.of(getClassName()); } else { return Optional.empty(); } } public boolean isJerseyClass() { return isAnnotated && isPublic && !isAbstract && (!isInnerClass || isStatic); } public String getClassName() { assert (className != null); return org.objectweb.asm.Type.getObjectType(className).getClassName(); } @Override public void visit(int version, int access, String name, String signature, String superName, String[] interfaces) { isPublic = isPublic(access); className = name; isAbstract = isAbstract(access); } @Override public void visitInnerClass(String name, String outerName, String innerName, int access) { assert (className != null); if (name.equals(className)) { isInnerClass = true; isStatic = isStatic(access); } } @Override public AnnotationVisitor visitAnnotation(String desc, boolean visible) { isAnnotated |= annotationClassDescriptors.contains(desc); return null; } private static Set<String> annotationClassDescriptors = new HashSet<>(); static { annotationClassDescriptors.add(Type.getDescriptor(Path.class)); annotationClassDescriptors.add(Type.getDescriptor(Provider.class)); } private static boolean isPublic(int access) { return isSet(Opcodes.ACC_PUBLIC, access); } private static boolean isStatic(int access) { return isSet(Opcodes.ACC_STATIC, access); } private static boolean isAbstract(int access) { return isSet(Opcodes.ACC_ABSTRACT, access); } private static boolean isSet(int bits, int access) { return (access & bits) == bits; } public static ResourceOrProviderClassVisitor visit(ClassReader classReader) { ResourceOrProviderClassVisitor visitor = new ResourceOrProviderClassVisitor(); classReader.accept(visitor, ClassReader.SKIP_DEBUG | ClassReader.SKIP_CODE | ClassReader.SKIP_FRAMES); return visitor; } }
Yes, we're not ready to use new ASM in container-jersey2 yet. First we need latest asm library all over, which requires latest maven-bundle-plugin which again requires maven 3.5.
public ResourceOrProviderClassVisitor() { super(Opcodes.ASM5); }
super(Opcodes.ASM5);
public ResourceOrProviderClassVisitor() { super(Opcodes.ASM5); }
class ResourceOrProviderClassVisitor extends ClassVisitor { private String className = null; private boolean isPublic = false; private boolean isAbstract = false; private boolean isInnerClass = false; private boolean isStatic = false; private boolean isAnnotated = false; public Optional<String> getJerseyClassName() { if (isJerseyClass()) { return Optional.of(getClassName()); } else { return Optional.empty(); } } public boolean isJerseyClass() { return isAnnotated && isPublic && !isAbstract && (!isInnerClass || isStatic); } public String getClassName() { assert (className != null); return org.objectweb.asm.Type.getObjectType(className).getClassName(); } @Override public void visit(int version, int access, String name, String signature, String superName, String[] interfaces) { isPublic = isPublic(access); className = name; isAbstract = isAbstract(access); } @Override public void visitInnerClass(String name, String outerName, String innerName, int access) { assert (className != null); if (name.equals(className)) { isInnerClass = true; isStatic = isStatic(access); } } @Override public AnnotationVisitor visitAnnotation(String desc, boolean visible) { isAnnotated |= annotationClassDescriptors.contains(desc); return null; } private static Set<String> annotationClassDescriptors = new HashSet<>(); static { annotationClassDescriptors.add(Type.getDescriptor(Path.class)); annotationClassDescriptors.add(Type.getDescriptor(Provider.class)); } private static boolean isPublic(int access) { return isSet(Opcodes.ACC_PUBLIC, access); } private static boolean isStatic(int access) { return isSet(Opcodes.ACC_STATIC, access); } private static boolean isAbstract(int access) { return isSet(Opcodes.ACC_ABSTRACT, access); } private static boolean isSet(int bits, int access) { return (access & bits) == bits; } public static ResourceOrProviderClassVisitor visit(ClassReader classReader) { ResourceOrProviderClassVisitor visitor = new ResourceOrProviderClassVisitor(); classReader.accept(visitor, ClassReader.SKIP_DEBUG | ClassReader.SKIP_CODE | ClassReader.SKIP_FRAMES); return visitor; } }
class ResourceOrProviderClassVisitor extends ClassVisitor { private String className = null; private boolean isPublic = false; private boolean isAbstract = false; private boolean isInnerClass = false; private boolean isStatic = false; private boolean isAnnotated = false; public Optional<String> getJerseyClassName() { if (isJerseyClass()) { return Optional.of(getClassName()); } else { return Optional.empty(); } } public boolean isJerseyClass() { return isAnnotated && isPublic && !isAbstract && (!isInnerClass || isStatic); } public String getClassName() { assert (className != null); return org.objectweb.asm.Type.getObjectType(className).getClassName(); } @Override public void visit(int version, int access, String name, String signature, String superName, String[] interfaces) { isPublic = isPublic(access); className = name; isAbstract = isAbstract(access); } @Override public void visitInnerClass(String name, String outerName, String innerName, int access) { assert (className != null); if (name.equals(className)) { isInnerClass = true; isStatic = isStatic(access); } } @Override public AnnotationVisitor visitAnnotation(String desc, boolean visible) { isAnnotated |= annotationClassDescriptors.contains(desc); return null; } private static Set<String> annotationClassDescriptors = new HashSet<>(); static { annotationClassDescriptors.add(Type.getDescriptor(Path.class)); annotationClassDescriptors.add(Type.getDescriptor(Provider.class)); } private static boolean isPublic(int access) { return isSet(Opcodes.ACC_PUBLIC, access); } private static boolean isStatic(int access) { return isSet(Opcodes.ACC_STATIC, access); } private static boolean isAbstract(int access) { return isSet(Opcodes.ACC_ABSTRACT, access); } private static boolean isSet(int bits, int access) { return (access & bits) == bits; } public static ResourceOrProviderClassVisitor visit(ClassReader classReader) { ResourceOrProviderClassVisitor visitor = new ResourceOrProviderClassVisitor(); classReader.accept(visitor, ClassReader.SKIP_DEBUG | ClassReader.SKIP_CODE | ClassReader.SKIP_FRAMES); return visitor; } }
Use `ConfigserverUtil` ?
public void toApplicationInstance() throws Exception { when(statusProvider.getStatus(any(), any(), any(), any())).thenReturn(ServiceStatus.NOT_CHECKED); ConfigserverConfig config = new ConfigserverConfig( new ConfigserverConfig.Builder() .hostedVespa(true) .zookeeperserver(new ConfigserverConfig.Zookeeperserver.Builder().hostname(configServer1).port(1)) .zookeeperserver(new ConfigserverConfig.Zookeeperserver.Builder().hostname(configServer2).port(2)) .zookeeperserver(new ConfigserverConfig.Zookeeperserver.Builder().hostname(configServer3).port(3))); DuperModel duperModel = new DuperModel(config); SuperModel superModel = mock(SuperModel.class); ApplicationInfo superModelApplicationInfo = mock(ApplicationInfo.class); when(superModel.getAllApplicationInfos()).thenReturn(Collections.singletonList(superModelApplicationInfo)); List<ApplicationInfo> applicationInfos = duperModel.getApplicationInfos(superModel); assertEquals(2, applicationInfos.size()); assertEquals(ConfigServerApplication.CONFIG_SERVER_APPLICATION.getApplicationId(), applicationInfos.get(0).getApplicationId()); assertSame(superModelApplicationInfo, applicationInfos.get(1)); }
ConfigserverConfig config = new ConfigserverConfig(
public void toApplicationInstance() { when(statusProvider.getStatus(any(), any(), any(), any())).thenReturn(ServiceStatus.NOT_CHECKED); ConfigserverConfig config = ConfigserverUtil.createExampleConfigserverConfig(true); DuperModel duperModel = new DuperModel(config); SuperModel superModel = mock(SuperModel.class); ApplicationInfo superModelApplicationInfo = mock(ApplicationInfo.class); when(superModel.getAllApplicationInfos()).thenReturn(Collections.singletonList(superModelApplicationInfo)); List<ApplicationInfo> applicationInfos = duperModel.getApplicationInfos(superModel); assertEquals(2, applicationInfos.size()); assertEquals(ConfigServerApplication.CONFIG_SERVER_APPLICATION.getApplicationId(), applicationInfos.get(0).getApplicationId()); assertSame(superModelApplicationInfo, applicationInfos.get(1)); }
class DuperModelTest { private static final String configServer1 = "cfg1.yahoo.com"; private static final String configServer2 = "cfg2.yahoo.com"; private static final String configServer3 = "cfg3.yahoo.com"; private static final List<String> configServerList = Stream.of( configServer1, configServer2, configServer3).collect(Collectors.toList()); private final ServiceStatusProvider statusProvider = mock(ServiceStatusProvider.class); @Test }
class DuperModelTest { private final ServiceStatusProvider statusProvider = mock(ServiceStatusProvider.class); @Test }
Done
public void toApplicationInstance() throws Exception { when(statusProvider.getStatus(any(), any(), any(), any())).thenReturn(ServiceStatus.NOT_CHECKED); ConfigserverConfig config = new ConfigserverConfig( new ConfigserverConfig.Builder() .hostedVespa(true) .zookeeperserver(new ConfigserverConfig.Zookeeperserver.Builder().hostname(configServer1).port(1)) .zookeeperserver(new ConfigserverConfig.Zookeeperserver.Builder().hostname(configServer2).port(2)) .zookeeperserver(new ConfigserverConfig.Zookeeperserver.Builder().hostname(configServer3).port(3))); DuperModel duperModel = new DuperModel(config); SuperModel superModel = mock(SuperModel.class); ApplicationInfo superModelApplicationInfo = mock(ApplicationInfo.class); when(superModel.getAllApplicationInfos()).thenReturn(Collections.singletonList(superModelApplicationInfo)); List<ApplicationInfo> applicationInfos = duperModel.getApplicationInfos(superModel); assertEquals(2, applicationInfos.size()); assertEquals(ConfigServerApplication.CONFIG_SERVER_APPLICATION.getApplicationId(), applicationInfos.get(0).getApplicationId()); assertSame(superModelApplicationInfo, applicationInfos.get(1)); }
ConfigserverConfig config = new ConfigserverConfig(
public void toApplicationInstance() { when(statusProvider.getStatus(any(), any(), any(), any())).thenReturn(ServiceStatus.NOT_CHECKED); ConfigserverConfig config = ConfigserverUtil.createExampleConfigserverConfig(true); DuperModel duperModel = new DuperModel(config); SuperModel superModel = mock(SuperModel.class); ApplicationInfo superModelApplicationInfo = mock(ApplicationInfo.class); when(superModel.getAllApplicationInfos()).thenReturn(Collections.singletonList(superModelApplicationInfo)); List<ApplicationInfo> applicationInfos = duperModel.getApplicationInfos(superModel); assertEquals(2, applicationInfos.size()); assertEquals(ConfigServerApplication.CONFIG_SERVER_APPLICATION.getApplicationId(), applicationInfos.get(0).getApplicationId()); assertSame(superModelApplicationInfo, applicationInfos.get(1)); }
class DuperModelTest { private static final String configServer1 = "cfg1.yahoo.com"; private static final String configServer2 = "cfg2.yahoo.com"; private static final String configServer3 = "cfg3.yahoo.com"; private static final List<String> configServerList = Stream.of( configServer1, configServer2, configServer3).collect(Collectors.toList()); private final ServiceStatusProvider statusProvider = mock(ServiceStatusProvider.class); @Test }
class DuperModelTest { private final ServiceStatusProvider statusProvider = mock(ServiceStatusProvider.class); @Test }
Woudn't it be better to throw an exception with some info instead of returning null, which will give a NullPointerException when the cache is used?
public CuratorDatabaseCache validCache() { return !expired() ? cache : null; }
return !expired() ? cache : null;
public CuratorDatabaseCache validCache() { if (expired()) { throw new IllegalStateException("The cache has generation " + cache.generation() + " while the root genration counter in zookeeper says " + generation + ". That is totally unacceptable and must be a sever programming error in my close vicinity."); } return cache; }
class CacheAndGeneration { public CacheAndGeneration(CuratorDatabaseCache cache, long generation) { this.cache = cache; this.generation = generation; } public boolean expired() { return generation != cache.generation(); } private CuratorDatabaseCache cache; private long generation; }
class CacheAndGeneration { public CacheAndGeneration(CuratorDatabaseCache cache, long generation) { this.cache = cache; this.generation = generation; } public boolean expired() { return generation != cache.generation(); } private CuratorDatabaseCache cache; private long generation; }
You are probably right. For a man born is the sixties, thinking that nothing really new has happened in computer science since mid seventies, learning to program with fortran, c, pascal and the early days of C++, throwing an exception is a very hard thing to do. But I guess I have to , not embrace, but at least accept some changes.
public CuratorDatabaseCache validCache() { return !expired() ? cache : null; }
return !expired() ? cache : null;
public CuratorDatabaseCache validCache() { if (expired()) { throw new IllegalStateException("The cache has generation " + cache.generation() + " while the root genration counter in zookeeper says " + generation + ". That is totally unacceptable and must be a sever programming error in my close vicinity."); } return cache; }
class CacheAndGeneration { public CacheAndGeneration(CuratorDatabaseCache cache, long generation) { this.cache = cache; this.generation = generation; } public boolean expired() { return generation != cache.generation(); } private CuratorDatabaseCache cache; private long generation; }
class CacheAndGeneration { public CacheAndGeneration(CuratorDatabaseCache cache, long generation) { this.cache = cache; this.generation = generation; } public boolean expired() { return generation != cache.generation(); } private CuratorDatabaseCache cache; private long generation; }
In C++ if is an error have no strategy for handling. Fx something that can not possibly happen. Getting a coredump is a lot better than throwing an exception that someone might catch and swallow. But coredumps are not an option in Java .....
public CuratorDatabaseCache validCache() { return !expired() ? cache : null; }
return !expired() ? cache : null;
public CuratorDatabaseCache validCache() { if (expired()) { throw new IllegalStateException("The cache has generation " + cache.generation() + " while the root genration counter in zookeeper says " + generation + ". That is totally unacceptable and must be a sever programming error in my close vicinity."); } return cache; }
class CacheAndGeneration { public CacheAndGeneration(CuratorDatabaseCache cache, long generation) { this.cache = cache; this.generation = generation; } public boolean expired() { return generation != cache.generation(); } private CuratorDatabaseCache cache; private long generation; }
class CacheAndGeneration { public CacheAndGeneration(CuratorDatabaseCache cache, long generation) { this.cache = cache; this.generation = generation; } public boolean expired() { return generation != cache.generation(); } private CuratorDatabaseCache cache; private long generation; }
Should we require that the hostname contains '.' if it already contains the highly inappropriate '_'?
private void checkName(HostSystem parent, String hostname) { try { Object address = java.net.InetAddress.getByName(hostname); } catch (UnknownHostException e) { deployLogger().log(Level.WARNING, "Unable to lookup IP address of host: " + hostname); } if (! hostname.contains(".")) { deployLogger().log(Level.WARNING, "Host named '" + hostname + "' may not receive any config " + "since it is not a canonical hostname"); } }
if (! hostname.contains(".")) {
private void checkName(HostSystem parent, String hostname) { try { Object address = java.net.InetAddress.getByName(hostname); } catch (UnknownHostException e) { deployLogger().log(Level.WARNING, "Unable to lookup IP address of host: " + hostname); } if (! hostname.contains(".")) { deployLogger().log(Level.WARNING, "Host named '" + hostname + "' may not receive any config " + "since it is not a canonical hostname"); } }
class Host extends AbstractConfigProducer<AbstractConfigProducer<?>> implements SentinelConfig.Producer, Comparable<Host> { private ConfigSentinel configSentinel = null; private final String hostname; private final boolean runsConfigServer; /** * Constructs a new Host instance. * * @param parent parent AbstractConfigProducer in the config model. * @param hostname hostname for this host. */ public Host(AbstractConfigProducer parent, String hostname) { this(parent, hostname, false); } private Host(AbstractConfigProducer parent, String hostname, boolean runsConfigServer) { super(parent, hostname); Objects.requireNonNull(hostname, "The host name of a host cannot be null"); this.runsConfigServer = runsConfigServer; this.hostname = hostname; if (parent instanceof HostSystem) checkName((HostSystem) parent, hostname); } public static Host createConfigServerHost(AbstractConfigProducer parent, String hostname) { return new Host(parent, hostname, true); } Host(AbstractConfigProducer parent) { super(parent, "testhost"); hostname = "testhost"; configSentinel = null; runsConfigServer = false; } public String getHostname() { return hostname; } public boolean runsConfigServer() { return runsConfigServer; } /** Returns the string representation of this Host object. */ public String toString() { return "host '" + getHostname() + "'"; } @Override public void writeFiles(File directory) throws IOException { } @Override public void getConfig(SentinelConfig.Builder builder) { if (configSentinel != null) { configSentinel.getConfig(builder); } } public void setConfigSentinel(ConfigSentinel configSentinel) { this.configSentinel = configSentinel; } @Override public int hashCode() { return hostname.hashCode(); } @Override public boolean equals(Object other) { if (other == this) return true; if ( ! (other instanceof Host)) return false; return ((Host)other).hostname.equals(hostname); } @Override public int compareTo(Host other) { return this.hostname.compareTo(other.hostname); } }
class Host extends AbstractConfigProducer<AbstractConfigProducer<?>> implements SentinelConfig.Producer, Comparable<Host> { private ConfigSentinel configSentinel = null; private final String hostname; private final boolean runsConfigServer; /** * Constructs a new Host instance. * * @param parent parent AbstractConfigProducer in the config model. * @param hostname hostname for this host. */ public Host(AbstractConfigProducer parent, String hostname) { this(parent, hostname, false); } private Host(AbstractConfigProducer parent, String hostname, boolean runsConfigServer) { super(parent, hostname); Objects.requireNonNull(hostname, "The host name of a host cannot be null"); this.runsConfigServer = runsConfigServer; this.hostname = hostname; if (parent instanceof HostSystem) checkName((HostSystem) parent, hostname); } public static Host createConfigServerHost(AbstractConfigProducer parent, String hostname) { return new Host(parent, hostname, true); } Host(AbstractConfigProducer parent) { super(parent, "testhost"); hostname = "testhost"; configSentinel = null; runsConfigServer = false; } public String getHostname() { return hostname; } public boolean runsConfigServer() { return runsConfigServer; } /** Returns the string representation of this Host object. */ public String toString() { return "host '" + getHostname() + "'"; } @Override public void writeFiles(File directory) throws IOException { } @Override public void getConfig(SentinelConfig.Builder builder) { if (configSentinel != null) { configSentinel.getConfig(builder); } } public void setConfigSentinel(ConfigSentinel configSentinel) { this.configSentinel = configSentinel; } @Override public int hashCode() { return hostname.hashCode(); } @Override public boolean equals(Object other) { if (other == this) return true; if ( ! (other instanceof Host)) return false; return ((Host)other).hostname.equals(hostname); } @Override public int compareTo(Host other) { return this.hostname.compareTo(other.hostname); } }
should give network frame size as parameter instead of 0 here
private SSLEngineResult sslEngineWrap(ByteBuffer src) throws IOException { SSLEngineResult result = sslEngine.wrap(src, wrapBuffer.getWritable(0)); if (result.getStatus() == Status.CLOSED) throw new ClosedChannelException(); return result; }
SSLEngineResult result = sslEngine.wrap(src, wrapBuffer.getWritable(0));
private SSLEngineResult sslEngineWrap(ByteBuffer src) throws IOException { SSLEngineResult result = sslEngine.wrap(src, wrapBuffer.getWritable(sessionPacketBufferSize)); if (result.getStatus() == Status.CLOSED) throw new ClosedChannelException(); return result; }
class TlsCryptoSocket implements CryptoSocket { private static final ByteBuffer NULL_BUFFER = ByteBuffer.allocate(0); private static final Logger log = Logger.getLogger(TlsCryptoSocket.class.getName()); private enum HandshakeState { NOT_STARTED, NEED_READ, NEED_WRITE, COMPLETED } private final SocketChannel channel; private final SSLEngine sslEngine; private final Buffer wrapBuffer; private final Buffer unwrapBuffer; private ByteBuffer handshakeDummyBuffer; private HandshakeState handshakeState; public TlsCryptoSocket(SocketChannel channel, SSLEngine sslEngine) { this.channel = channel; this.sslEngine = sslEngine; SSLSession nullSession = sslEngine.getSession(); this.wrapBuffer = new Buffer(nullSession.getPacketBufferSize() * 2); this.unwrapBuffer = new Buffer(nullSession.getPacketBufferSize() * 2); this.handshakeDummyBuffer = ByteBuffer.allocate(nullSession.getApplicationBufferSize()); this.handshakeState = HandshakeState.NOT_STARTED; } @Override public SocketChannel channel() { return channel; } @Override public HandshakeResult handshake() throws IOException { HandshakeState newHandshakeState = processHandshakeState(this.handshakeState); log.fine(() -> String.format("Handshake state '%s -> %s'", this.handshakeState, newHandshakeState)); this.handshakeState = newHandshakeState; return toHandshakeResult(newHandshakeState); } private HandshakeState processHandshakeState(HandshakeState state) throws IOException { switch (state) { case NOT_STARTED: sslEngine.beginHandshake(); break; case NEED_WRITE: channelWrite(); break; case NEED_READ: channelRead(); break; case COMPLETED: return HandshakeState.COMPLETED; default: throw unhandledStateException(state); } while (true) { switch (sslEngine.getHandshakeStatus()) { case NOT_HANDSHAKING: if (hasWrapBufferMoreData()) return HandshakeState.NEED_WRITE; sslEngine.setEnableSessionCreation(false); handshakeDummyBuffer = null; return HandshakeState.COMPLETED; case NEED_TASK: sslEngine.getDelegatedTask().run(); break; case NEED_UNWRAP: if (hasWrapBufferMoreData()) return HandshakeState.NEED_WRITE; if (!handshakeUnwrap()) return HandshakeState.NEED_READ; break; case NEED_WRAP: if (!handshakeWrap()) return HandshakeState.NEED_WRITE; break; default: throw new IllegalStateException("Unexpected handshake status: " + sslEngine.getHandshakeStatus()); } } } private static HandshakeResult toHandshakeResult(HandshakeState state) { switch (state) { case NEED_READ: return HandshakeResult.NEED_READ; case NEED_WRITE: return HandshakeResult.NEED_WRITE; case COMPLETED: return HandshakeResult.DONE; default: throw unhandledStateException(state); } } @Override public int getMinimumReadBufferSize() { return sslEngine.getSession().getApplicationBufferSize(); } @Override public int read(ByteBuffer dst) throws IOException { verifyHandshakeCompleted(); int bytesUnwrapped = applicationDataUnwrap(dst); if (bytesUnwrapped > 0) return bytesUnwrapped; int bytesRead = channelRead(); if (bytesRead == 0) return 0; return drain(dst); } @Override public int drain(ByteBuffer dst) throws IOException { verifyHandshakeCompleted(); int totalBytesUnwrapped = 0; int bytesUnwrapped; do { bytesUnwrapped = applicationDataUnwrap(dst); totalBytesUnwrapped += bytesUnwrapped; } while (bytesUnwrapped > 0); return totalBytesUnwrapped; } @Override public int write(ByteBuffer src) throws IOException { if (flush() == FlushResult.NEED_WRITE) return 0; int totalBytesWrapped = 0; while (src.hasRemaining()) { int bytesWrapped = applicationDataWrap(src); if (bytesWrapped == 0) break; totalBytesWrapped += bytesWrapped; } return totalBytesWrapped; } @Override public FlushResult flush() throws IOException { channelWrite(); return hasWrapBufferMoreData() ? FlushResult.NEED_WRITE : FlushResult.DONE; } private boolean handshakeWrap() throws IOException { SSLEngineResult result = sslEngineWrap(NULL_BUFFER); switch (result.getStatus()) { case OK: return true; case BUFFER_OVERFLOW: return false; default: throw unexpectedStatusException(result.getStatus()); } } private int applicationDataWrap(ByteBuffer src) throws IOException { SSLEngineResult result = sslEngineWrap(src); switch (result.getStatus()) { case OK: int bytesConsumed = result.bytesConsumed(); if (bytesConsumed == 0) throw new SSLException("Got handshake data in application data wrap"); return bytesConsumed; case BUFFER_OVERFLOW: return 0; default: throw unexpectedStatusException(result.getStatus()); } } private boolean handshakeUnwrap() throws IOException { SSLEngineResult result = sslEngineUnwrap(handshakeDummyBuffer); switch (result.getStatus()) { case OK: if (result.bytesProduced() > 0) throw new SSLException("Got application data in handshake unwrap"); return true; case BUFFER_UNDERFLOW: return false; default: throw unexpectedStatusException(result.getStatus()); } } private int applicationDataUnwrap(ByteBuffer dst) throws IOException { SSLEngineResult result = sslEngineUnwrap(dst); switch (result.getStatus()) { case OK: int bytesProduced = result.bytesProduced(); if (bytesProduced == 0) throw new SSLException("Got handshake data in application data unwrap"); return bytesProduced; case BUFFER_OVERFLOW: case BUFFER_UNDERFLOW: return 0; default: throw unexpectedStatusException(result.getStatus()); } } private SSLEngineResult sslEngineUnwrap(ByteBuffer dst) throws IOException { SSLEngineResult result = sslEngine.unwrap(unwrapBuffer.getReadable(), dst); if (result.getStatus() == Status.CLOSED) throw new ClosedChannelException(); return result; } private int channelRead() throws IOException { int read = channel.read(unwrapBuffer.getWritable(0)); if (read == -1) throw new ClosedChannelException(); return read; } private int channelWrite() throws IOException { return channel.write(wrapBuffer.getReadable()); } private static IllegalStateException unhandledStateException(HandshakeState state) { return new IllegalStateException("Unhandled state: " + state); } private static IllegalStateException unexpectedStatusException(Status status) { return new IllegalStateException("Unexpected status: " + status); } private void verifyHandshakeCompleted() throws SSLException { if (handshakeState != HandshakeState.COMPLETED) throw new SSLException("Handshake not completed: handshakeState=" + handshakeState); } private boolean hasWrapBufferMoreData() { return wrapBuffer.bytes() > 0; } }
class TlsCryptoSocket implements CryptoSocket { private static final ByteBuffer NULL_BUFFER = ByteBuffer.allocate(0); private static final Logger log = Logger.getLogger(TlsCryptoSocket.class.getName()); private enum HandshakeState { NOT_STARTED, NEED_READ, NEED_WRITE, COMPLETED } private final SocketChannel channel; private final SSLEngine sslEngine; private final Buffer wrapBuffer; private final Buffer unwrapBuffer; private int sessionPacketBufferSize; private int sessionApplicationBufferSize; private ByteBuffer handshakeDummyBuffer; private HandshakeState handshakeState; public TlsCryptoSocket(SocketChannel channel, SSLEngine sslEngine) { this.channel = channel; this.sslEngine = sslEngine; SSLSession nullSession = sslEngine.getSession(); this.wrapBuffer = new Buffer(nullSession.getPacketBufferSize() * 2); this.unwrapBuffer = new Buffer(nullSession.getPacketBufferSize() * 2); this.handshakeDummyBuffer = ByteBuffer.allocate(nullSession.getApplicationBufferSize()); this.handshakeState = HandshakeState.NOT_STARTED; } @Override public SocketChannel channel() { return channel; } @Override public HandshakeResult handshake() throws IOException { HandshakeState newHandshakeState = processHandshakeState(this.handshakeState); log.fine(() -> String.format("Handshake state '%s -> %s'", this.handshakeState, newHandshakeState)); this.handshakeState = newHandshakeState; return toHandshakeResult(newHandshakeState); } private HandshakeState processHandshakeState(HandshakeState state) throws IOException { switch (state) { case NOT_STARTED: sslEngine.beginHandshake(); break; case NEED_WRITE: channelWrite(); break; case NEED_READ: channelRead(); break; case COMPLETED: return HandshakeState.COMPLETED; default: throw unhandledStateException(state); } while (true) { switch (sslEngine.getHandshakeStatus()) { case NOT_HANDSHAKING: if (wrapBuffer.bytes() > 0) return HandshakeState.NEED_WRITE; sslEngine.setEnableSessionCreation(false); handshakeDummyBuffer = null; SSLSession session = sslEngine.getSession(); sessionApplicationBufferSize = session.getApplicationBufferSize(); sessionPacketBufferSize = session.getPacketBufferSize(); return HandshakeState.COMPLETED; case NEED_TASK: sslEngine.getDelegatedTask().run(); break; case NEED_UNWRAP: if (wrapBuffer.bytes() > 0) return HandshakeState.NEED_WRITE; if (!handshakeUnwrap()) return HandshakeState.NEED_READ; break; case NEED_WRAP: if (!handshakeWrap()) return HandshakeState.NEED_WRITE; break; default: throw new IllegalStateException("Unexpected handshake status: " + sslEngine.getHandshakeStatus()); } } } private static HandshakeResult toHandshakeResult(HandshakeState state) { switch (state) { case NEED_READ: return HandshakeResult.NEED_READ; case NEED_WRITE: return HandshakeResult.NEED_WRITE; case COMPLETED: return HandshakeResult.DONE; default: throw unhandledStateException(state); } } @Override public int getMinimumReadBufferSize() { return sessionApplicationBufferSize; } @Override public int read(ByteBuffer dst) throws IOException { verifyHandshakeCompleted(); int bytesUnwrapped = drain(dst); if (bytesUnwrapped > 0) return bytesUnwrapped; int bytesRead = channelRead(); if (bytesRead == 0) return 0; return drain(dst); } @Override public int drain(ByteBuffer dst) throws IOException { verifyHandshakeCompleted(); int totalBytesUnwrapped = 0; int bytesUnwrapped; do { bytesUnwrapped = applicationDataUnwrap(dst); totalBytesUnwrapped += bytesUnwrapped; } while (bytesUnwrapped > 0); return totalBytesUnwrapped; } @Override public int write(ByteBuffer src) throws IOException { if (flush() == FlushResult.NEED_WRITE) return 0; int totalBytesWrapped = 0; int bytesWrapped; do { bytesWrapped = applicationDataWrap(src); totalBytesWrapped += bytesWrapped; } while (bytesWrapped > 0 && wrapBuffer.bytes() < sessionPacketBufferSize); return totalBytesWrapped; } @Override public FlushResult flush() throws IOException { channelWrite(); return wrapBuffer.bytes() > 0 ? FlushResult.NEED_WRITE : FlushResult.DONE; } private boolean handshakeWrap() throws IOException { SSLEngineResult result = sslEngineWrap(NULL_BUFFER); switch (result.getStatus()) { case OK: return true; case BUFFER_OVERFLOW: return false; default: throw unexpectedStatusException(result.getStatus()); } } private int applicationDataWrap(ByteBuffer src) throws IOException { SSLEngineResult result = sslEngineWrap(src); if (result.getHandshakeStatus() != HandshakeStatus.NOT_HANDSHAKING) throw new SSLException("Renegotiation detected"); switch (result.getStatus()) { case OK: return result.bytesConsumed(); case BUFFER_OVERFLOW: return 0; default: throw unexpectedStatusException(result.getStatus()); } } private boolean handshakeUnwrap() throws IOException { SSLEngineResult result = sslEngineUnwrap(handshakeDummyBuffer); switch (result.getStatus()) { case OK: if (result.bytesProduced() > 0) throw new SSLException("Got application data in handshake unwrap"); return true; case BUFFER_UNDERFLOW: return false; default: throw unexpectedStatusException(result.getStatus()); } } private int applicationDataUnwrap(ByteBuffer dst) throws IOException { SSLEngineResult result = sslEngineUnwrap(dst); if (result.getHandshakeStatus() != HandshakeStatus.NOT_HANDSHAKING) throw new SSLException("Renegotiation detected"); switch (result.getStatus()) { case OK: return result.bytesProduced(); case BUFFER_OVERFLOW: case BUFFER_UNDERFLOW: return 0; default: throw unexpectedStatusException(result.getStatus()); } } private SSLEngineResult sslEngineUnwrap(ByteBuffer dst) throws IOException { SSLEngineResult result = sslEngine.unwrap(unwrapBuffer.getReadable(), dst); if (result.getStatus() == Status.CLOSED) throw new ClosedChannelException(); return result; } private int channelRead() throws IOException { int read = channel.read(unwrapBuffer.getWritable(sessionPacketBufferSize)); if (read == -1) throw new ClosedChannelException(); return read; } private int channelWrite() throws IOException { return channel.write(wrapBuffer.getReadable()); } private static IllegalStateException unhandledStateException(HandshakeState state) { return new IllegalStateException("Unhandled state: " + state); } private static IllegalStateException unexpectedStatusException(Status status) { return new IllegalStateException("Unexpected status: " + status); } private void verifyHandshakeCompleted() throws SSLException { if (handshakeState != HandshakeState.COMPLETED) throw new SSLException("Handshake not completed: handshakeState=" + handshakeState); } }
since vespa-detect-hostname runs SocketAddress::normalize() it's highly expected that hostname will get a domain suffix, even when it contains "_" or is otherwise abnormal.
private void checkName(HostSystem parent, String hostname) { try { Object address = java.net.InetAddress.getByName(hostname); } catch (UnknownHostException e) { deployLogger().log(Level.WARNING, "Unable to lookup IP address of host: " + hostname); } if (! hostname.contains(".")) { deployLogger().log(Level.WARNING, "Host named '" + hostname + "' may not receive any config " + "since it is not a canonical hostname"); } }
if (! hostname.contains(".")) {
private void checkName(HostSystem parent, String hostname) { try { Object address = java.net.InetAddress.getByName(hostname); } catch (UnknownHostException e) { deployLogger().log(Level.WARNING, "Unable to lookup IP address of host: " + hostname); } if (! hostname.contains(".")) { deployLogger().log(Level.WARNING, "Host named '" + hostname + "' may not receive any config " + "since it is not a canonical hostname"); } }
class Host extends AbstractConfigProducer<AbstractConfigProducer<?>> implements SentinelConfig.Producer, Comparable<Host> { private ConfigSentinel configSentinel = null; private final String hostname; private final boolean runsConfigServer; /** * Constructs a new Host instance. * * @param parent parent AbstractConfigProducer in the config model. * @param hostname hostname for this host. */ public Host(AbstractConfigProducer parent, String hostname) { this(parent, hostname, false); } private Host(AbstractConfigProducer parent, String hostname, boolean runsConfigServer) { super(parent, hostname); Objects.requireNonNull(hostname, "The host name of a host cannot be null"); this.runsConfigServer = runsConfigServer; this.hostname = hostname; if (parent instanceof HostSystem) checkName((HostSystem) parent, hostname); } public static Host createConfigServerHost(AbstractConfigProducer parent, String hostname) { return new Host(parent, hostname, true); } Host(AbstractConfigProducer parent) { super(parent, "testhost"); hostname = "testhost"; configSentinel = null; runsConfigServer = false; } public String getHostname() { return hostname; } public boolean runsConfigServer() { return runsConfigServer; } /** Returns the string representation of this Host object. */ public String toString() { return "host '" + getHostname() + "'"; } @Override public void writeFiles(File directory) throws IOException { } @Override public void getConfig(SentinelConfig.Builder builder) { if (configSentinel != null) { configSentinel.getConfig(builder); } } public void setConfigSentinel(ConfigSentinel configSentinel) { this.configSentinel = configSentinel; } @Override public int hashCode() { return hostname.hashCode(); } @Override public boolean equals(Object other) { if (other == this) return true; if ( ! (other instanceof Host)) return false; return ((Host)other).hostname.equals(hostname); } @Override public int compareTo(Host other) { return this.hostname.compareTo(other.hostname); } }
class Host extends AbstractConfigProducer<AbstractConfigProducer<?>> implements SentinelConfig.Producer, Comparable<Host> { private ConfigSentinel configSentinel = null; private final String hostname; private final boolean runsConfigServer; /** * Constructs a new Host instance. * * @param parent parent AbstractConfigProducer in the config model. * @param hostname hostname for this host. */ public Host(AbstractConfigProducer parent, String hostname) { this(parent, hostname, false); } private Host(AbstractConfigProducer parent, String hostname, boolean runsConfigServer) { super(parent, hostname); Objects.requireNonNull(hostname, "The host name of a host cannot be null"); this.runsConfigServer = runsConfigServer; this.hostname = hostname; if (parent instanceof HostSystem) checkName((HostSystem) parent, hostname); } public static Host createConfigServerHost(AbstractConfigProducer parent, String hostname) { return new Host(parent, hostname, true); } Host(AbstractConfigProducer parent) { super(parent, "testhost"); hostname = "testhost"; configSentinel = null; runsConfigServer = false; } public String getHostname() { return hostname; } public boolean runsConfigServer() { return runsConfigServer; } /** Returns the string representation of this Host object. */ public String toString() { return "host '" + getHostname() + "'"; } @Override public void writeFiles(File directory) throws IOException { } @Override public void getConfig(SentinelConfig.Builder builder) { if (configSentinel != null) { configSentinel.getConfig(builder); } } public void setConfigSentinel(ConfigSentinel configSentinel) { this.configSentinel = configSentinel; } @Override public int hashCode() { return hostname.hashCode(); } @Override public boolean equals(Object other) { if (other == this) return true; if ( ! (other instanceof Host)) return false; return ((Host)other).hostname.equals(hostname); } @Override public int compareTo(Host other) { return this.hostname.compareTo(other.hostname); } }
I would expect symbolic container names with _ to be considered canonical by the docker dns proxy. I have not tested this, so I do not know.
private void checkName(HostSystem parent, String hostname) { try { Object address = java.net.InetAddress.getByName(hostname); } catch (UnknownHostException e) { deployLogger().log(Level.WARNING, "Unable to lookup IP address of host: " + hostname); } if (! hostname.contains(".")) { deployLogger().log(Level.WARNING, "Host named '" + hostname + "' may not receive any config " + "since it is not a canonical hostname"); } }
if (! hostname.contains(".")) {
private void checkName(HostSystem parent, String hostname) { try { Object address = java.net.InetAddress.getByName(hostname); } catch (UnknownHostException e) { deployLogger().log(Level.WARNING, "Unable to lookup IP address of host: " + hostname); } if (! hostname.contains(".")) { deployLogger().log(Level.WARNING, "Host named '" + hostname + "' may not receive any config " + "since it is not a canonical hostname"); } }
class Host extends AbstractConfigProducer<AbstractConfigProducer<?>> implements SentinelConfig.Producer, Comparable<Host> { private ConfigSentinel configSentinel = null; private final String hostname; private final boolean runsConfigServer; /** * Constructs a new Host instance. * * @param parent parent AbstractConfigProducer in the config model. * @param hostname hostname for this host. */ public Host(AbstractConfigProducer parent, String hostname) { this(parent, hostname, false); } private Host(AbstractConfigProducer parent, String hostname, boolean runsConfigServer) { super(parent, hostname); Objects.requireNonNull(hostname, "The host name of a host cannot be null"); this.runsConfigServer = runsConfigServer; this.hostname = hostname; if (parent instanceof HostSystem) checkName((HostSystem) parent, hostname); } public static Host createConfigServerHost(AbstractConfigProducer parent, String hostname) { return new Host(parent, hostname, true); } Host(AbstractConfigProducer parent) { super(parent, "testhost"); hostname = "testhost"; configSentinel = null; runsConfigServer = false; } public String getHostname() { return hostname; } public boolean runsConfigServer() { return runsConfigServer; } /** Returns the string representation of this Host object. */ public String toString() { return "host '" + getHostname() + "'"; } @Override public void writeFiles(File directory) throws IOException { } @Override public void getConfig(SentinelConfig.Builder builder) { if (configSentinel != null) { configSentinel.getConfig(builder); } } public void setConfigSentinel(ConfigSentinel configSentinel) { this.configSentinel = configSentinel; } @Override public int hashCode() { return hostname.hashCode(); } @Override public boolean equals(Object other) { if (other == this) return true; if ( ! (other instanceof Host)) return false; return ((Host)other).hostname.equals(hostname); } @Override public int compareTo(Host other) { return this.hostname.compareTo(other.hostname); } }
class Host extends AbstractConfigProducer<AbstractConfigProducer<?>> implements SentinelConfig.Producer, Comparable<Host> { private ConfigSentinel configSentinel = null; private final String hostname; private final boolean runsConfigServer; /** * Constructs a new Host instance. * * @param parent parent AbstractConfigProducer in the config model. * @param hostname hostname for this host. */ public Host(AbstractConfigProducer parent, String hostname) { this(parent, hostname, false); } private Host(AbstractConfigProducer parent, String hostname, boolean runsConfigServer) { super(parent, hostname); Objects.requireNonNull(hostname, "The host name of a host cannot be null"); this.runsConfigServer = runsConfigServer; this.hostname = hostname; if (parent instanceof HostSystem) checkName((HostSystem) parent, hostname); } public static Host createConfigServerHost(AbstractConfigProducer parent, String hostname) { return new Host(parent, hostname, true); } Host(AbstractConfigProducer parent) { super(parent, "testhost"); hostname = "testhost"; configSentinel = null; runsConfigServer = false; } public String getHostname() { return hostname; } public boolean runsConfigServer() { return runsConfigServer; } /** Returns the string representation of this Host object. */ public String toString() { return "host '" + getHostname() + "'"; } @Override public void writeFiles(File directory) throws IOException { } @Override public void getConfig(SentinelConfig.Builder builder) { if (configSentinel != null) { configSentinel.getConfig(builder); } } public void setConfigSentinel(ConfigSentinel configSentinel) { this.configSentinel = configSentinel; } @Override public int hashCode() { return hostname.hashCode(); } @Override public boolean equals(Object other) { if (other == this) return true; if ( ! (other instanceof Host)) return false; return ((Host)other).hostname.equals(hostname); } @Override public int compareTo(Host other) { return this.hostname.compareTo(other.hostname); } }
Consider triggering a better error message if a malformed header argument is passed. Right now it's going to be an `ArrayIndexOutOfBoundsException`
SessionParams createSessionParams(boolean useJson) { final int minThrottleValue = useDynamicThrottlingArg ? 10 : 0; ConnectionParams.Builder connectionParamsBuilder = new ConnectionParams.Builder(); for (String header : headers) { String[] nameAndValue = header.split(":"); connectionParamsBuilder.addHeader(nameAndValue[0].trim(), nameAndValue[1].trim()); } SessionParams.Builder builder = new SessionParams.Builder() .setFeedParams( new FeedParams.Builder() .setDataFormat(useJson ? FeedParams.DataFormat.JSON_UTF8 : FeedParams.DataFormat.XML_UTF8) .setRoute(routeArg) .setMaxInFlightRequests(maxPendingOperationCountArg) .setClientTimeout(timeoutArg, TimeUnit.SECONDS) .setServerTimeout(timeoutArg, TimeUnit.SECONDS) .setLocalQueueTimeOut(timeoutArg * 1000) .setPriority(priorityArg) .setMaxChunkSizeBytes(maxChunkSizeBytes) .build() ) .setConnectionParams( connectionParamsBuilder .setHostnameVerifier(insecure ? NoopHostnameVerifier.INSTANCE : SSLConnectionSocketFactory.getDefaultHostnameVerifier()) .setNumPersistentConnectionsPerEndpoint(16) .setEnableV3Protocol(! enableV2Protocol) .setUseCompression(useCompressionArg) .setMaxRetries(noRetryArg ? 0 : 100) .setMinTimeBetweenRetries(retrydelayArg, TimeUnit.SECONDS) .setDryRun(validateArg) .setTraceLevel(traceArg) .setTraceEveryXOperation(traceEveryXOperation) .setPrintTraceToStdErr(traceArg > 0) .setNumPersistentConnectionsPerEndpoint(numPersistentConnectionsPerEndpoint) .build() ) .setThrottlerMinSize(minThrottleValue) .setClientQueueSize(maxPendingOperationCountArg); Iterable<String> hosts = Splitter.on(',').trimResults().split(hostArg); for (String host : hosts) { builder.addCluster(new Cluster.Builder() .addEndpoint(Endpoint.create(host, portArg, useTls)) .build()); } return builder.build(); }
connectionParamsBuilder.addHeader(nameAndValue[0].trim(), nameAndValue[1].trim());
SessionParams createSessionParams(boolean useJson) { final int minThrottleValue = useDynamicThrottlingArg ? 10 : 0; ConnectionParams.Builder connectionParamsBuilder = new ConnectionParams.Builder(); parsedHeaders.forEach(header -> connectionParamsBuilder.addHeader(header.getName(), header.getValue())); SessionParams.Builder builder = new SessionParams.Builder() .setFeedParams( new FeedParams.Builder() .setDataFormat(useJson ? FeedParams.DataFormat.JSON_UTF8 : FeedParams.DataFormat.XML_UTF8) .setRoute(routeArg) .setMaxInFlightRequests(maxPendingOperationCountArg) .setClientTimeout(timeoutArg, TimeUnit.SECONDS) .setServerTimeout(timeoutArg, TimeUnit.SECONDS) .setLocalQueueTimeOut(timeoutArg * 1000) .setPriority(priorityArg) .setMaxChunkSizeBytes(maxChunkSizeBytes) .build() ) .setConnectionParams( connectionParamsBuilder .setHostnameVerifier(insecure ? NoopHostnameVerifier.INSTANCE : SSLConnectionSocketFactory.getDefaultHostnameVerifier()) .setNumPersistentConnectionsPerEndpoint(16) .setEnableV3Protocol(! enableV2Protocol) .setUseCompression(useCompressionArg) .setMaxRetries(noRetryArg ? 0 : 100) .setMinTimeBetweenRetries(retrydelayArg, TimeUnit.SECONDS) .setDryRun(validateArg) .setTraceLevel(traceArg) .setTraceEveryXOperation(traceEveryXOperation) .setPrintTraceToStdErr(traceArg > 0) .setNumPersistentConnectionsPerEndpoint(numPersistentConnectionsPerEndpoint) .build() ) .setThrottlerMinSize(minThrottleValue) .setClientQueueSize(maxPendingOperationCountArg); Iterable<String> hosts = Splitter.on(',').trimResults().split(hostArg); for (String host : hosts) { builder.addCluster(new Cluster.Builder() .addEndpoint(Endpoint.create(host, portArg, useTls)) .build()); } return builder.build(); }
class CommandLineArguments { /** * Creates a CommandLineArguments instance and populates it with data. * * @param args array of arguments. * @return null on failure or if help option is set to true. */ static CommandLineArguments build(String[] args) { final CommandLineArguments cmdArgs; try { cmdArgs = SingleCommand.singleCommand(CommandLineArguments.class).parse(args); } catch (Exception e) { System.err.println(e.getMessage()); System.err.println("Use --help to show usage.\n"); return null; } if (cmdArgs.helpOption.showHelpIfRequested()) { return null; } if (cmdArgs.hostArg == null) { System.err.println("'--host' not set."); return null; } if (cmdArgs.priorityArg != null && ! checkPriorityFlag(cmdArgs.priorityArg)) { return null; } return cmdArgs; } private static boolean checkPriorityFlag(String priorityArg) { switch (priorityArg) { case "HIGHEST": case "VERY_HIGH": case "HIGH_1": case "HIGH_2": case "HIGH_3": case "NORMAL_1": case "NORMAL_2": case "NORMAL_3": case "NORMAL_4": case "NORMAL_5": case "NORMAL_6": case "LOW_1": case "LOW_2": case "LOW_3": case "VERY_LOW": case "LOWEST": return true; default: System.err.println("Not valid value for priority. Allowed values are HIGHEST, VERY_HIGH, HIGH_[1-3], " + "NORMAL_[1-6], LOW_[1-3], VERY_LOW, and LOWEST."); return false; } } @Inject private HelpOption helpOption; @Option(name = {"--useV3Protocol"}, description = "Not used anymore, see useV2Protocol.") private boolean notUsedBoolean = true; @Option(name = {"--useV2Protocol"}, description = "Use old V2 protocol to gateway.") private boolean enableV2Protocol = false; @Option(name = {"--file"}, description = "The name of the input file to read.") private String fileArg = null; @Option(name = {"--add-root-element-to-xml"}, description = "Add <vespafeed> tag to XML document, makes it easier to feed raw data.") private boolean addRootElementToXml = false; @Option(name = {"--route"}, description = "(=default)The route to send the data to.") private String routeArg = "default"; @Option(name = {"--host"}, description = "The host(s) for the gateway. If using several, use comma to sepparate them.") private String hostArg; @Option(name = {"--port"}, description = "The port for the host of the gateway.") private int portArg = 4080; @Option(name = {"--timeout"}, description = "(=180) The time (in seconds) allowed for sending operations.") private long timeoutArg = 180; @Option(name = {"--useCompression"}, description = "Use compression over network.") private boolean useCompressionArg = false; @Option(name = {"--useDynamicThrottling"}, description = "Try to maximize throughput by using dynamic throttling.") private boolean useDynamicThrottlingArg = false; @Option(name = {"--maxpending"}, description = "The maximum number of operations that are allowed " + "to be pending at any given time.") private int maxPendingOperationCountArg = 10000; @Option(name = {"--debugport"}, description = "Deprecated, not used.") private int debugportArg = 9988; @Option(name = {"-v", "--verbose"}, description = "Enable verbose output of progress.") private boolean verboaseArg = false; @Option(name = {"--noretry"}, description = "Turns off retries of recoverable failures..") private boolean noRetryArg = false; @Option(name = {"--retrydelay"}, description = "The time (in seconds) to wait between retries of a failed operation.") private int retrydelayArg = 1; @Option(name = {"--trace"}, description = "(=0 (=off)) The trace level of network traffic.") private int traceArg = 0; @Option(name = {"--printTraceEveryXOperation"}, description = "(=1) How often to to tracing.") private int traceEveryXOperation = 1; @Option(name = {"--validate"}, description = "Run validation tool on input files instead of feeding them.") private boolean validateArg = false; @Option(name = {"--priority"}, description = "Specify priority of sent messages, see documentation ") private String priorityArg = null; @Option(name = {"--numPersistentConnectionsPerEndpoint"}, description = "How many tcp connections to establish per endoint.)") private int numPersistentConnectionsPerEndpoint = 16; @Option(name = {"--maxChunkSizeBytes"}, description = "How much data to send to gateway in each message.") private int maxChunkSizeBytes = 20 * 1024; @Option(name = {"--whenVerboseEnabledPrintMessageForEveryXDocuments"}, description = "How often to print verbose message.)") private int whenVerboseEnabledPrintMessageForEveryXDocuments = 1000; @Option(name = {"--useTls"}, description = "Use TLS when connecting to endpoint") private boolean useTls = false; @Option(name = {"--insecure"}, description = "Skip hostname verification when using TLS") private boolean insecure = false; @Option(name = {"--header"}, description = "Add http header to every request. Header must have the format '<Name>: <Value>'. Use this parameter multiple times for multiple headers") private List<String> headers = new ArrayList<>(); int getWhenVerboseEnabledPrintMessageForEveryXDocuments() { return whenVerboseEnabledPrintMessageForEveryXDocuments; } public String getFile() { return fileArg; }; public boolean getVerbose() { return verboaseArg; } public boolean getAddRootElementToXml() { return addRootElementToXml; } }
class CommandLineArguments { /** * Creates a CommandLineArguments instance and populates it with data. * * @param args array of arguments. * @return null on failure or if help option is set to true. */ static CommandLineArguments build(String[] args) { final CommandLineArguments cmdArgs; try { cmdArgs = SingleCommand.singleCommand(CommandLineArguments.class).parse(args); } catch (Exception e) { System.err.println(e.getMessage()); System.err.println("Use --help to show usage.\n"); return null; } if (cmdArgs.helpOption.showHelpIfRequested()) { return null; } if (cmdArgs.hostArg == null) { System.err.println("'--host' not set."); return null; } if (cmdArgs.priorityArg != null && ! checkPriorityFlag(cmdArgs.priorityArg)) { return null; } for (String header : cmdArgs.headers) { try { cmdArgs.parsedHeaders.add(BasicLineParser.parseHeader(header, null)); } catch (ParseException e) { System.err.printf("Invalid header: '%s' (%s)%n", header, e.getMessage()); return null; } } return cmdArgs; } private static boolean checkPriorityFlag(String priorityArg) { switch (priorityArg) { case "HIGHEST": case "VERY_HIGH": case "HIGH_1": case "HIGH_2": case "HIGH_3": case "NORMAL_1": case "NORMAL_2": case "NORMAL_3": case "NORMAL_4": case "NORMAL_5": case "NORMAL_6": case "LOW_1": case "LOW_2": case "LOW_3": case "VERY_LOW": case "LOWEST": return true; default: System.err.println("Not valid value for priority. Allowed values are HIGHEST, VERY_HIGH, HIGH_[1-3], " + "NORMAL_[1-6], LOW_[1-3], VERY_LOW, and LOWEST."); return false; } } @Inject private HelpOption helpOption; @Option(name = {"--useV3Protocol"}, description = "Not used anymore, see useV2Protocol.") private boolean notUsedBoolean = true; @Option(name = {"--useV2Protocol"}, description = "Use old V2 protocol to gateway.") private boolean enableV2Protocol = false; @Option(name = {"--file"}, description = "The name of the input file to read.") private String fileArg = null; @Option(name = {"--add-root-element-to-xml"}, description = "Add <vespafeed> tag to XML document, makes it easier to feed raw data.") private boolean addRootElementToXml = false; @Option(name = {"--route"}, description = "(=default)The route to send the data to.") private String routeArg = "default"; @Option(name = {"--host"}, description = "The host(s) for the gateway. If using several, use comma to sepparate them.") private String hostArg; @Option(name = {"--port"}, description = "The port for the host of the gateway.") private int portArg = 4080; @Option(name = {"--timeout"}, description = "(=180) The time (in seconds) allowed for sending operations.") private long timeoutArg = 180; @Option(name = {"--useCompression"}, description = "Use compression over network.") private boolean useCompressionArg = false; @Option(name = {"--useDynamicThrottling"}, description = "Try to maximize throughput by using dynamic throttling.") private boolean useDynamicThrottlingArg = false; @Option(name = {"--maxpending"}, description = "The maximum number of operations that are allowed " + "to be pending at any given time.") private int maxPendingOperationCountArg = 10000; @Option(name = {"--debugport"}, description = "Deprecated, not used.") private int debugportArg = 9988; @Option(name = {"-v", "--verbose"}, description = "Enable verbose output of progress.") private boolean verboaseArg = false; @Option(name = {"--noretry"}, description = "Turns off retries of recoverable failures..") private boolean noRetryArg = false; @Option(name = {"--retrydelay"}, description = "The time (in seconds) to wait between retries of a failed operation.") private int retrydelayArg = 1; @Option(name = {"--trace"}, description = "(=0 (=off)) The trace level of network traffic.") private int traceArg = 0; @Option(name = {"--printTraceEveryXOperation"}, description = "(=1) How often to to tracing.") private int traceEveryXOperation = 1; @Option(name = {"--validate"}, description = "Run validation tool on input files instead of feeding them.") private boolean validateArg = false; @Option(name = {"--priority"}, description = "Specify priority of sent messages, see documentation ") private String priorityArg = null; @Option(name = {"--numPersistentConnectionsPerEndpoint"}, description = "How many tcp connections to establish per endoint.)") private int numPersistentConnectionsPerEndpoint = 16; @Option(name = {"--maxChunkSizeBytes"}, description = "How much data to send to gateway in each message.") private int maxChunkSizeBytes = 20 * 1024; @Option(name = {"--whenVerboseEnabledPrintMessageForEveryXDocuments"}, description = "How often to print verbose message.)") private int whenVerboseEnabledPrintMessageForEveryXDocuments = 1000; @Option(name = {"--useTls"}, description = "Use TLS when connecting to endpoint") private boolean useTls = false; @Option(name = {"--insecure"}, description = "Skip hostname verification when using TLS") private boolean insecure = false; @Option(name = {"--header"}, description = "Add http header to every request. Header must have the format '<Name>: <Value>'. Use this parameter multiple times for multiple headers") private List<String> headers = new ArrayList<>(); private final List<Header> parsedHeaders = new ArrayList<>(); int getWhenVerboseEnabledPrintMessageForEveryXDocuments() { return whenVerboseEnabledPrintMessageForEveryXDocuments; } public String getFile() { return fileArg; }; public boolean getVerbose() { return verboaseArg; } public boolean getAddRootElementToXml() { return addRootElementToXml; } }
I'm not sure why the tests in this class even use a `CountDownLatch`, as (if I understand the mock setup correctly) the lambda given to `mockHttpClientFactory` executes in the context of the test thread (and this isn't actually thread safe anyway). Maybe things have changed over time, or it's a fascinating way to fail the test if the lambda isn't executed (seems the most likely) ¯\\\_(ツ)_/¯ Would probably be better to use an explicit mock expectation, but this _does_ work (though confusing) so it's optional.
public void detailed_error_message_is_extracted_from_error_responses_with_json() throws IOException, ServerResponseException, InterruptedException { String reasonPhrase = "Unauthorized"; String errorMessage = "Invalid credentials"; expectedException.expect(ServerResponseException.class); expectedException.expectMessage(reasonPhrase + " - " + errorMessage); CountDownLatch verifyContentSentLatch = new CountDownLatch(1); ApacheGatewayConnection.HttpClientFactory mockFactory = mockHttpClientFactory(post -> { verifyContentSentLatch.countDown(); return createErrorHttpResponse(401, reasonPhrase, errorMessage); }); ApacheGatewayConnection apacheGatewayConnection = new ApacheGatewayConnection( Endpoint.create("hostname", 666, false), new FeedParams.Builder().build(), "", new ConnectionParams.Builder().build(), mockFactory, "clientId"); apacheGatewayConnection.connect(); apacheGatewayConnection.handshake(); apacheGatewayConnection.writeOperations(Collections.singletonList(createDoc("42", "content", true))); assertTrue(verifyContentSentLatch.await(10, TimeUnit.SECONDS)); }
assertTrue(verifyContentSentLatch.await(10, TimeUnit.SECONDS));
public void detailed_error_message_is_extracted_from_error_responses_with_json() throws IOException, ServerResponseException { String reasonPhrase = "Unauthorized"; String errorMessage = "Invalid credentials"; expectedException.expect(ServerResponseException.class); expectedException.expectMessage(reasonPhrase + " - " + errorMessage); ApacheGatewayConnection.HttpClientFactory mockFactory = mockHttpClientFactory(post -> createErrorHttpResponse(401, reasonPhrase, errorMessage)); ApacheGatewayConnection apacheGatewayConnection = new ApacheGatewayConnection( Endpoint.create("hostname", 666, false), new FeedParams.Builder().build(), "", new ConnectionParams.Builder().build(), mockFactory, "clientId"); apacheGatewayConnection.connect(); apacheGatewayConnection.handshake(); apacheGatewayConnection.writeOperations(Collections.singletonList(createDoc("42", "content", true))); }
class ApacheGatewayConnectionTest { @Rule public ExpectedException expectedException = ExpectedException.none(); @Test public void testProtocolV3() throws Exception { final Endpoint endpoint = Endpoint.create("hostname", 666, false); final FeedParams feedParams = new FeedParams.Builder().setDataFormat(FeedParams.DataFormat.JSON_UTF8).build(); final String clusterSpecificRoute = ""; final ConnectionParams connectionParams = new ConnectionParams.Builder() .setEnableV3Protocol(true) .build(); final List<Document> documents = new ArrayList<>(); final CountDownLatch verifyContentSentLatch = new CountDownLatch(1); final String vespaDocContent ="Hello, I a JSON doc."; final String docId = "42"; final AtomicInteger requestsReceived = new AtomicInteger(0); ApacheGatewayConnection.HttpClientFactory mockFactory = mockHttpClientFactory(post -> { final Header clientIdHeader = post.getFirstHeader(Headers.CLIENT_ID); verifyContentSentLatch.countDown(); return httpResponse(clientIdHeader.getValue(), "3"); }); ApacheGatewayConnection apacheGatewayConnection = new ApacheGatewayConnection( endpoint, feedParams, clusterSpecificRoute, connectionParams, mockFactory, "clientId"); apacheGatewayConnection.connect(); apacheGatewayConnection.handshake(); documents.add(createDoc(docId, vespaDocContent, true)); apacheGatewayConnection.writeOperations(documents); assertTrue(verifyContentSentLatch.await(10, TimeUnit.SECONDS)); } @Test(expected=IllegalArgumentException.class) public void testServerReturnsBadSessionInV3() throws Exception { final Endpoint endpoint = Endpoint.create("hostname", 666, false); final FeedParams feedParams = new FeedParams.Builder().setDataFormat(FeedParams.DataFormat.JSON_UTF8).build(); final String clusterSpecificRoute = ""; final ConnectionParams connectionParams = new ConnectionParams.Builder() .setEnableV3Protocol(true) .build(); ApacheGatewayConnection.HttpClientFactory mockFactory = mockHttpClientFactory(post -> httpResponse("Wrong Id from server", "3")); ApacheGatewayConnection apacheGatewayConnection = new ApacheGatewayConnection( endpoint, feedParams, clusterSpecificRoute, connectionParams, mockFactory, "clientId"); apacheGatewayConnection.connect(); final List<Document> documents = new ArrayList<>(); apacheGatewayConnection.writeOperations(documents); } @Test(expected=RuntimeException.class) public void testBadConfigParameters() throws Exception { final Endpoint endpoint = Endpoint.create("hostname", 666, false); final FeedParams feedParams = new FeedParams.Builder().setDataFormat(FeedParams.DataFormat.JSON_UTF8).build(); final String clusterSpecificRoute = ""; final ConnectionParams connectionParams = new ConnectionParams.Builder() .setEnableV3Protocol(true) .build(); final ApacheGatewayConnection.HttpClientFactory mockFactory = mock(ApacheGatewayConnection.HttpClientFactory.class); new ApacheGatewayConnection( endpoint, feedParams, clusterSpecificRoute, connectionParams, mockFactory, null); } @Test public void testJsonDocumentHeader() throws Exception { final Endpoint endpoint = Endpoint.create("hostname", 666, false); final FeedParams feedParams = new FeedParams.Builder().setDataFormat(FeedParams.DataFormat.JSON_UTF8).build(); final String clusterSpecificRoute = ""; final ConnectionParams connectionParams = new ConnectionParams.Builder() .setUseCompression(true) .build(); final List<Document> documents = new ArrayList<>(); final CountDownLatch verifyContentSentLatch = new CountDownLatch(1); final String vespaDocContent ="Hello, I a JSON doc."; final String docId = "42"; final AtomicInteger requestsReceived = new AtomicInteger(0); ApacheGatewayConnection.HttpClientFactory mockFactory = mockHttpClientFactory(post -> { final Header header = post.getFirstHeader(Headers.DATA_FORMAT); if (requestsReceived.incrementAndGet() == 1) { assert (header == null); return httpResponse("clientId", "3"); } assertNotNull(header); assertThat(header.getValue(), is(FeedParams.DataFormat.JSON_UTF8.name())); verifyContentSentLatch.countDown(); return httpResponse("clientId", "3"); }); ApacheGatewayConnection apacheGatewayConnection = new ApacheGatewayConnection( endpoint, feedParams, clusterSpecificRoute, connectionParams, mockFactory, "clientId"); apacheGatewayConnection.connect(); apacheGatewayConnection.handshake(); documents.add(createDoc(docId, vespaDocContent, true)); apacheGatewayConnection.writeOperations(documents); assertTrue(verifyContentSentLatch.await(10, TimeUnit.SECONDS)); } @Test public void testZipAndCreateEntity() throws IOException { final String testString = "Hello world"; InputStream stream = new ByteArrayInputStream(testString.getBytes(StandardCharsets.UTF_8)); InputStreamEntity inputStreamEntity = ApacheGatewayConnection.zipAndCreateEntity(stream); final String rawContent = TestUtils.zipStreamToString(inputStreamEntity.getContent()); assert(testString.equals(rawContent)); } /** * Mocks the HttpClient, and verifies that the compressed data is sent. */ @Test public void testCompressedWriteOperations() throws Exception { final Endpoint endpoint = Endpoint.create("hostname", 666, false); final FeedParams feedParams = new FeedParams.Builder().build(); final String clusterSpecificRoute = ""; final ConnectionParams connectionParams = new ConnectionParams.Builder() .setUseCompression(true) .build(); final List<Document> documents = new ArrayList<>(); final CountDownLatch verifyContentSentLatch = new CountDownLatch(1); final String vespaDocContent ="Hello, I am the document data."; final String docId = "42"; final Document doc = createDoc(docId, vespaDocContent, false); ApacheGatewayConnection.HttpClientFactory mockFactory = mockHttpClientFactory(post -> { final Header header = post.getFirstHeader("Content-Encoding"); if (header != null && header.getValue().equals("gzip")) { final String rawContent = TestUtils.zipStreamToString(post.getEntity().getContent()); final String vespaHeaderText = "<vespafeed>\n"; final String vespaFooterText = "</vespafeed>\n"; assertThat(rawContent, is( doc.getOperationId() + " 38\n" + vespaHeaderText + vespaDocContent + "\n" + vespaFooterText)); verifyContentSentLatch.countDown(); } return httpResponse("clientId", "3"); }); StatusLine statusLineMock = mock(StatusLine.class); when(statusLineMock.getStatusCode()).thenReturn(200); ApacheGatewayConnection apacheGatewayConnection = new ApacheGatewayConnection( endpoint, feedParams, clusterSpecificRoute, connectionParams, mockFactory, "clientId"); apacheGatewayConnection.connect(); apacheGatewayConnection.handshake(); documents.add(doc); apacheGatewayConnection.writeOperations(documents); assertTrue(verifyContentSentLatch.await(10, TimeUnit.SECONDS)); } @Test public void dynamic_headers_are_added_to_the_response() throws IOException, ServerResponseException, InterruptedException { ConnectionParams.HeaderProvider headerProvider = mock(ConnectionParams.HeaderProvider.class); when(headerProvider.getHeaderValue()) .thenReturn("v1") .thenReturn("v2") .thenReturn("v3"); ConnectionParams connectionParams = new ConnectionParams.Builder() .addDynamicHeader("foo", headerProvider) .build(); CountDownLatch verifyContentSentLatch = new CountDownLatch(1); AtomicInteger counter = new AtomicInteger(1); ApacheGatewayConnection.HttpClientFactory mockFactory = mockHttpClientFactory(post -> { Header[] fooHeader = post.getHeaders("foo"); assertEquals(1, fooHeader.length); assertEquals("foo", fooHeader[0].getName()); assertEquals("v" + counter.getAndIncrement(), fooHeader[0].getValue()); verifyContentSentLatch.countDown(); return httpResponse("clientId", "3"); }); ApacheGatewayConnection apacheGatewayConnection = new ApacheGatewayConnection( Endpoint.create("hostname", 666, false), new FeedParams.Builder().build(), "", connectionParams, mockFactory, "clientId"); apacheGatewayConnection.connect(); apacheGatewayConnection.handshake(); List<Document> documents = new ArrayList<>(); documents.add(createDoc("42", "content", true)); apacheGatewayConnection.writeOperations(documents); apacheGatewayConnection.writeOperations(documents); assertTrue(verifyContentSentLatch.await(10, TimeUnit.SECONDS)); verify(headerProvider, times(3)).getHeaderValue(); } @Test private static ApacheGatewayConnection.HttpClientFactory mockHttpClientFactory(HttpExecuteMock httpExecuteMock) throws IOException { ApacheGatewayConnection.HttpClientFactory mockFactory = mock(ApacheGatewayConnection.HttpClientFactory.class); HttpClient httpClientMock = mock(HttpClient.class); when(mockFactory.createClient()).thenReturn(httpClientMock); stub(httpClientMock.execute(any())).toAnswer((Answer) invocation -> { Object[] args = invocation.getArguments(); HttpPost post = (HttpPost) args[0]; return httpExecuteMock.execute(post); }); return mockFactory; } @FunctionalInterface private interface HttpExecuteMock { HttpResponse execute(HttpPost httpPost) throws IOException; } private Document createDoc(final String docId, final String content, boolean useJson) throws IOException { return new Document(docId, content.getBytes(), null /* context */); } private void addMockedHeader( final HttpResponse httpResponseMock, final String name, final String value, HeaderElement[] elements) { final Header header = new Header() { @Override public String getName() { return name; } @Override public String getValue() { return value; } @Override public HeaderElement[] getElements() throws ParseException { return elements; } }; when(httpResponseMock.getFirstHeader(name)).thenReturn(header); } private HttpResponse httpResponse(String sessionIdInResult, String version) throws IOException { final HttpResponse httpResponseMock = mock(HttpResponse.class); StatusLine statusLineMock = mock(StatusLine.class); when(httpResponseMock.getStatusLine()).thenReturn(statusLineMock); when(statusLineMock.getStatusCode()).thenReturn(200); addMockedHeader(httpResponseMock, Headers.SESSION_ID, sessionIdInResult, null); addMockedHeader(httpResponseMock, Headers.VERSION, version, null); HeaderElement[] headerElements = new HeaderElement[1]; headerElements[0] = mock(HeaderElement.class); final HttpEntity httpEntityMock = mock(HttpEntity.class); when(httpResponseMock.getEntity()).thenReturn(httpEntityMock); final InputStream inputs = new ByteArrayInputStream("fake response data".getBytes()); when(httpEntityMock.getContent()).thenReturn(inputs); return httpResponseMock; } private static HttpResponse createErrorHttpResponse(int statusCode, String reasonPhrase, String message) throws IOException { HttpResponse response = mock(HttpResponse.class); StatusLine statusLine = mock(StatusLine.class); when(statusLine.getStatusCode()).thenReturn(statusCode); when(statusLine.getReasonPhrase()).thenReturn(reasonPhrase); when(response.getStatusLine()).thenReturn(statusLine); HttpEntity httpEntity = mock(HttpEntity.class); when(httpEntity.getContentType()).thenReturn(new BasicHeader("Content-Type", "application/json")); String json = String.format("{\"message\": \"%s\"}", message); when(httpEntity.getContent()).thenReturn(new ByteArrayInputStream(json.getBytes())); when(response.getEntity()).thenReturn(httpEntity); return response; } }
class ApacheGatewayConnectionTest { @Rule public ExpectedException expectedException = ExpectedException.none(); @Test public void testProtocolV3() throws Exception { final Endpoint endpoint = Endpoint.create("hostname", 666, false); final FeedParams feedParams = new FeedParams.Builder().setDataFormat(FeedParams.DataFormat.JSON_UTF8).build(); final String clusterSpecificRoute = ""; final ConnectionParams connectionParams = new ConnectionParams.Builder() .setEnableV3Protocol(true) .build(); final List<Document> documents = new ArrayList<>(); final String vespaDocContent ="Hello, I a JSON doc."; final String docId = "42"; final AtomicInteger requestsReceived = new AtomicInteger(0); ApacheGatewayConnection.HttpClientFactory mockFactory = mockHttpClientFactory(post -> { final Header clientIdHeader = post.getFirstHeader(Headers.CLIENT_ID); return httpResponse(clientIdHeader.getValue(), "3"); }); ApacheGatewayConnection apacheGatewayConnection = new ApacheGatewayConnection( endpoint, feedParams, clusterSpecificRoute, connectionParams, mockFactory, "clientId"); apacheGatewayConnection.connect(); apacheGatewayConnection.handshake(); documents.add(createDoc(docId, vespaDocContent, true)); apacheGatewayConnection.writeOperations(documents); } @Test(expected=IllegalArgumentException.class) public void testServerReturnsBadSessionInV3() throws Exception { final Endpoint endpoint = Endpoint.create("hostname", 666, false); final FeedParams feedParams = new FeedParams.Builder().setDataFormat(FeedParams.DataFormat.JSON_UTF8).build(); final String clusterSpecificRoute = ""; final ConnectionParams connectionParams = new ConnectionParams.Builder() .setEnableV3Protocol(true) .build(); ApacheGatewayConnection.HttpClientFactory mockFactory = mockHttpClientFactory(post -> httpResponse("Wrong Id from server", "3")); ApacheGatewayConnection apacheGatewayConnection = new ApacheGatewayConnection( endpoint, feedParams, clusterSpecificRoute, connectionParams, mockFactory, "clientId"); apacheGatewayConnection.connect(); final List<Document> documents = new ArrayList<>(); apacheGatewayConnection.writeOperations(documents); } @Test(expected=RuntimeException.class) public void testBadConfigParameters() throws Exception { final Endpoint endpoint = Endpoint.create("hostname", 666, false); final FeedParams feedParams = new FeedParams.Builder().setDataFormat(FeedParams.DataFormat.JSON_UTF8).build(); final String clusterSpecificRoute = ""; final ConnectionParams connectionParams = new ConnectionParams.Builder() .setEnableV3Protocol(true) .build(); final ApacheGatewayConnection.HttpClientFactory mockFactory = mock(ApacheGatewayConnection.HttpClientFactory.class); new ApacheGatewayConnection( endpoint, feedParams, clusterSpecificRoute, connectionParams, mockFactory, null); } @Test public void testJsonDocumentHeader() throws Exception { final Endpoint endpoint = Endpoint.create("hostname", 666, false); final FeedParams feedParams = new FeedParams.Builder().setDataFormat(FeedParams.DataFormat.JSON_UTF8).build(); final String clusterSpecificRoute = ""; final ConnectionParams connectionParams = new ConnectionParams.Builder() .setUseCompression(true) .build(); final List<Document> documents = new ArrayList<>(); final String vespaDocContent ="Hello, I a JSON doc."; final String docId = "42"; final AtomicInteger requestsReceived = new AtomicInteger(0); ApacheGatewayConnection.HttpClientFactory mockFactory = mockHttpClientFactory(post -> { final Header header = post.getFirstHeader(Headers.DATA_FORMAT); if (requestsReceived.incrementAndGet() == 1) { assert (header == null); return httpResponse("clientId", "3"); } assertNotNull(header); assertThat(header.getValue(), is(FeedParams.DataFormat.JSON_UTF8.name())); return httpResponse("clientId", "3"); }); ApacheGatewayConnection apacheGatewayConnection = new ApacheGatewayConnection( endpoint, feedParams, clusterSpecificRoute, connectionParams, mockFactory, "clientId"); apacheGatewayConnection.connect(); apacheGatewayConnection.handshake(); documents.add(createDoc(docId, vespaDocContent, true)); apacheGatewayConnection.writeOperations(documents); } @Test public void testZipAndCreateEntity() throws IOException { final String testString = "Hello world"; InputStream stream = new ByteArrayInputStream(testString.getBytes(StandardCharsets.UTF_8)); InputStreamEntity inputStreamEntity = ApacheGatewayConnection.zipAndCreateEntity(stream); final String rawContent = TestUtils.zipStreamToString(inputStreamEntity.getContent()); assert(testString.equals(rawContent)); } /** * Mocks the HttpClient, and verifies that the compressed data is sent. */ @Test public void testCompressedWriteOperations() throws Exception { final Endpoint endpoint = Endpoint.create("hostname", 666, false); final FeedParams feedParams = new FeedParams.Builder().build(); final String clusterSpecificRoute = ""; final ConnectionParams connectionParams = new ConnectionParams.Builder() .setUseCompression(true) .build(); final List<Document> documents = new ArrayList<>(); final String vespaDocContent ="Hello, I am the document data."; final String docId = "42"; final Document doc = createDoc(docId, vespaDocContent, false); ApacheGatewayConnection.HttpClientFactory mockFactory = mockHttpClientFactory(post -> { final Header header = post.getFirstHeader("Content-Encoding"); if (header != null && header.getValue().equals("gzip")) { final String rawContent = TestUtils.zipStreamToString(post.getEntity().getContent()); final String vespaHeaderText = "<vespafeed>\n"; final String vespaFooterText = "</vespafeed>\n"; assertThat(rawContent, is( doc.getOperationId() + " 38\n" + vespaHeaderText + vespaDocContent + "\n" + vespaFooterText)); } return httpResponse("clientId", "3"); }); StatusLine statusLineMock = mock(StatusLine.class); when(statusLineMock.getStatusCode()).thenReturn(200); ApacheGatewayConnection apacheGatewayConnection = new ApacheGatewayConnection( endpoint, feedParams, clusterSpecificRoute, connectionParams, mockFactory, "clientId"); apacheGatewayConnection.connect(); apacheGatewayConnection.handshake(); documents.add(doc); apacheGatewayConnection.writeOperations(documents); } @Test public void dynamic_headers_are_added_to_the_response() throws IOException, ServerResponseException, InterruptedException { ConnectionParams.HeaderProvider headerProvider = mock(ConnectionParams.HeaderProvider.class); when(headerProvider.getHeaderValue()) .thenReturn("v1") .thenReturn("v2") .thenReturn("v3"); ConnectionParams connectionParams = new ConnectionParams.Builder() .addDynamicHeader("foo", headerProvider) .build(); AtomicInteger counter = new AtomicInteger(1); ApacheGatewayConnection.HttpClientFactory mockFactory = mockHttpClientFactory(post -> { Header[] fooHeader = post.getHeaders("foo"); assertEquals(1, fooHeader.length); assertEquals("foo", fooHeader[0].getName()); assertEquals("v" + counter.getAndIncrement(), fooHeader[0].getValue()); return httpResponse("clientId", "3"); }); ApacheGatewayConnection apacheGatewayConnection = new ApacheGatewayConnection( Endpoint.create("hostname", 666, false), new FeedParams.Builder().build(), "", connectionParams, mockFactory, "clientId"); apacheGatewayConnection.connect(); apacheGatewayConnection.handshake(); List<Document> documents = new ArrayList<>(); documents.add(createDoc("42", "content", true)); apacheGatewayConnection.writeOperations(documents); apacheGatewayConnection.writeOperations(documents); verify(headerProvider, times(3)).getHeaderValue(); } @Test private static ApacheGatewayConnection.HttpClientFactory mockHttpClientFactory(HttpExecuteMock httpExecuteMock) throws IOException { ApacheGatewayConnection.HttpClientFactory mockFactory = mock(ApacheGatewayConnection.HttpClientFactory.class); HttpClient httpClientMock = mock(HttpClient.class); when(mockFactory.createClient()).thenReturn(httpClientMock); stub(httpClientMock.execute(any())).toAnswer((Answer) invocation -> { Object[] args = invocation.getArguments(); HttpPost post = (HttpPost) args[0]; return httpExecuteMock.execute(post); }); return mockFactory; } @FunctionalInterface private interface HttpExecuteMock { HttpResponse execute(HttpPost httpPost) throws IOException; } private Document createDoc(final String docId, final String content, boolean useJson) throws IOException { return new Document(docId, content.getBytes(), null /* context */); } private void addMockedHeader( final HttpResponse httpResponseMock, final String name, final String value, HeaderElement[] elements) { final Header header = new Header() { @Override public String getName() { return name; } @Override public String getValue() { return value; } @Override public HeaderElement[] getElements() throws ParseException { return elements; } }; when(httpResponseMock.getFirstHeader(name)).thenReturn(header); } private HttpResponse httpResponse(String sessionIdInResult, String version) throws IOException { final HttpResponse httpResponseMock = mock(HttpResponse.class); StatusLine statusLineMock = mock(StatusLine.class); when(httpResponseMock.getStatusLine()).thenReturn(statusLineMock); when(statusLineMock.getStatusCode()).thenReturn(200); addMockedHeader(httpResponseMock, Headers.SESSION_ID, sessionIdInResult, null); addMockedHeader(httpResponseMock, Headers.VERSION, version, null); HeaderElement[] headerElements = new HeaderElement[1]; headerElements[0] = mock(HeaderElement.class); final HttpEntity httpEntityMock = mock(HttpEntity.class); when(httpResponseMock.getEntity()).thenReturn(httpEntityMock); final InputStream inputs = new ByteArrayInputStream("fake response data".getBytes()); when(httpEntityMock.getContent()).thenReturn(inputs); return httpResponseMock; } private static HttpResponse createErrorHttpResponse(int statusCode, String reasonPhrase, String message) throws IOException { HttpResponse response = mock(HttpResponse.class); StatusLine statusLine = mock(StatusLine.class); when(statusLine.getStatusCode()).thenReturn(statusCode); when(statusLine.getReasonPhrase()).thenReturn(reasonPhrase); when(response.getStatusLine()).thenReturn(statusLine); HttpEntity httpEntity = mock(HttpEntity.class); when(httpEntity.getContentType()).thenReturn(new BasicHeader("Content-Type", "application/json")); String json = String.format("{\"message\": \"%s\"}", message); when(httpEntity.getContent()).thenReturn(new ByteArrayInputStream(json.getBytes())); when(response.getEntity()).thenReturn(httpEntity); return response; } }
I came to the same conclusion after looking at the test methods closer. The latches are removed in latest commit. PTAL
public void detailed_error_message_is_extracted_from_error_responses_with_json() throws IOException, ServerResponseException, InterruptedException { String reasonPhrase = "Unauthorized"; String errorMessage = "Invalid credentials"; expectedException.expect(ServerResponseException.class); expectedException.expectMessage(reasonPhrase + " - " + errorMessage); CountDownLatch verifyContentSentLatch = new CountDownLatch(1); ApacheGatewayConnection.HttpClientFactory mockFactory = mockHttpClientFactory(post -> { verifyContentSentLatch.countDown(); return createErrorHttpResponse(401, reasonPhrase, errorMessage); }); ApacheGatewayConnection apacheGatewayConnection = new ApacheGatewayConnection( Endpoint.create("hostname", 666, false), new FeedParams.Builder().build(), "", new ConnectionParams.Builder().build(), mockFactory, "clientId"); apacheGatewayConnection.connect(); apacheGatewayConnection.handshake(); apacheGatewayConnection.writeOperations(Collections.singletonList(createDoc("42", "content", true))); assertTrue(verifyContentSentLatch.await(10, TimeUnit.SECONDS)); }
assertTrue(verifyContentSentLatch.await(10, TimeUnit.SECONDS));
public void detailed_error_message_is_extracted_from_error_responses_with_json() throws IOException, ServerResponseException { String reasonPhrase = "Unauthorized"; String errorMessage = "Invalid credentials"; expectedException.expect(ServerResponseException.class); expectedException.expectMessage(reasonPhrase + " - " + errorMessage); ApacheGatewayConnection.HttpClientFactory mockFactory = mockHttpClientFactory(post -> createErrorHttpResponse(401, reasonPhrase, errorMessage)); ApacheGatewayConnection apacheGatewayConnection = new ApacheGatewayConnection( Endpoint.create("hostname", 666, false), new FeedParams.Builder().build(), "", new ConnectionParams.Builder().build(), mockFactory, "clientId"); apacheGatewayConnection.connect(); apacheGatewayConnection.handshake(); apacheGatewayConnection.writeOperations(Collections.singletonList(createDoc("42", "content", true))); }
class ApacheGatewayConnectionTest { @Rule public ExpectedException expectedException = ExpectedException.none(); @Test public void testProtocolV3() throws Exception { final Endpoint endpoint = Endpoint.create("hostname", 666, false); final FeedParams feedParams = new FeedParams.Builder().setDataFormat(FeedParams.DataFormat.JSON_UTF8).build(); final String clusterSpecificRoute = ""; final ConnectionParams connectionParams = new ConnectionParams.Builder() .setEnableV3Protocol(true) .build(); final List<Document> documents = new ArrayList<>(); final CountDownLatch verifyContentSentLatch = new CountDownLatch(1); final String vespaDocContent ="Hello, I a JSON doc."; final String docId = "42"; final AtomicInteger requestsReceived = new AtomicInteger(0); ApacheGatewayConnection.HttpClientFactory mockFactory = mockHttpClientFactory(post -> { final Header clientIdHeader = post.getFirstHeader(Headers.CLIENT_ID); verifyContentSentLatch.countDown(); return httpResponse(clientIdHeader.getValue(), "3"); }); ApacheGatewayConnection apacheGatewayConnection = new ApacheGatewayConnection( endpoint, feedParams, clusterSpecificRoute, connectionParams, mockFactory, "clientId"); apacheGatewayConnection.connect(); apacheGatewayConnection.handshake(); documents.add(createDoc(docId, vespaDocContent, true)); apacheGatewayConnection.writeOperations(documents); assertTrue(verifyContentSentLatch.await(10, TimeUnit.SECONDS)); } @Test(expected=IllegalArgumentException.class) public void testServerReturnsBadSessionInV3() throws Exception { final Endpoint endpoint = Endpoint.create("hostname", 666, false); final FeedParams feedParams = new FeedParams.Builder().setDataFormat(FeedParams.DataFormat.JSON_UTF8).build(); final String clusterSpecificRoute = ""; final ConnectionParams connectionParams = new ConnectionParams.Builder() .setEnableV3Protocol(true) .build(); ApacheGatewayConnection.HttpClientFactory mockFactory = mockHttpClientFactory(post -> httpResponse("Wrong Id from server", "3")); ApacheGatewayConnection apacheGatewayConnection = new ApacheGatewayConnection( endpoint, feedParams, clusterSpecificRoute, connectionParams, mockFactory, "clientId"); apacheGatewayConnection.connect(); final List<Document> documents = new ArrayList<>(); apacheGatewayConnection.writeOperations(documents); } @Test(expected=RuntimeException.class) public void testBadConfigParameters() throws Exception { final Endpoint endpoint = Endpoint.create("hostname", 666, false); final FeedParams feedParams = new FeedParams.Builder().setDataFormat(FeedParams.DataFormat.JSON_UTF8).build(); final String clusterSpecificRoute = ""; final ConnectionParams connectionParams = new ConnectionParams.Builder() .setEnableV3Protocol(true) .build(); final ApacheGatewayConnection.HttpClientFactory mockFactory = mock(ApacheGatewayConnection.HttpClientFactory.class); new ApacheGatewayConnection( endpoint, feedParams, clusterSpecificRoute, connectionParams, mockFactory, null); } @Test public void testJsonDocumentHeader() throws Exception { final Endpoint endpoint = Endpoint.create("hostname", 666, false); final FeedParams feedParams = new FeedParams.Builder().setDataFormat(FeedParams.DataFormat.JSON_UTF8).build(); final String clusterSpecificRoute = ""; final ConnectionParams connectionParams = new ConnectionParams.Builder() .setUseCompression(true) .build(); final List<Document> documents = new ArrayList<>(); final CountDownLatch verifyContentSentLatch = new CountDownLatch(1); final String vespaDocContent ="Hello, I a JSON doc."; final String docId = "42"; final AtomicInteger requestsReceived = new AtomicInteger(0); ApacheGatewayConnection.HttpClientFactory mockFactory = mockHttpClientFactory(post -> { final Header header = post.getFirstHeader(Headers.DATA_FORMAT); if (requestsReceived.incrementAndGet() == 1) { assert (header == null); return httpResponse("clientId", "3"); } assertNotNull(header); assertThat(header.getValue(), is(FeedParams.DataFormat.JSON_UTF8.name())); verifyContentSentLatch.countDown(); return httpResponse("clientId", "3"); }); ApacheGatewayConnection apacheGatewayConnection = new ApacheGatewayConnection( endpoint, feedParams, clusterSpecificRoute, connectionParams, mockFactory, "clientId"); apacheGatewayConnection.connect(); apacheGatewayConnection.handshake(); documents.add(createDoc(docId, vespaDocContent, true)); apacheGatewayConnection.writeOperations(documents); assertTrue(verifyContentSentLatch.await(10, TimeUnit.SECONDS)); } @Test public void testZipAndCreateEntity() throws IOException { final String testString = "Hello world"; InputStream stream = new ByteArrayInputStream(testString.getBytes(StandardCharsets.UTF_8)); InputStreamEntity inputStreamEntity = ApacheGatewayConnection.zipAndCreateEntity(stream); final String rawContent = TestUtils.zipStreamToString(inputStreamEntity.getContent()); assert(testString.equals(rawContent)); } /** * Mocks the HttpClient, and verifies that the compressed data is sent. */ @Test public void testCompressedWriteOperations() throws Exception { final Endpoint endpoint = Endpoint.create("hostname", 666, false); final FeedParams feedParams = new FeedParams.Builder().build(); final String clusterSpecificRoute = ""; final ConnectionParams connectionParams = new ConnectionParams.Builder() .setUseCompression(true) .build(); final List<Document> documents = new ArrayList<>(); final CountDownLatch verifyContentSentLatch = new CountDownLatch(1); final String vespaDocContent ="Hello, I am the document data."; final String docId = "42"; final Document doc = createDoc(docId, vespaDocContent, false); ApacheGatewayConnection.HttpClientFactory mockFactory = mockHttpClientFactory(post -> { final Header header = post.getFirstHeader("Content-Encoding"); if (header != null && header.getValue().equals("gzip")) { final String rawContent = TestUtils.zipStreamToString(post.getEntity().getContent()); final String vespaHeaderText = "<vespafeed>\n"; final String vespaFooterText = "</vespafeed>\n"; assertThat(rawContent, is( doc.getOperationId() + " 38\n" + vespaHeaderText + vespaDocContent + "\n" + vespaFooterText)); verifyContentSentLatch.countDown(); } return httpResponse("clientId", "3"); }); StatusLine statusLineMock = mock(StatusLine.class); when(statusLineMock.getStatusCode()).thenReturn(200); ApacheGatewayConnection apacheGatewayConnection = new ApacheGatewayConnection( endpoint, feedParams, clusterSpecificRoute, connectionParams, mockFactory, "clientId"); apacheGatewayConnection.connect(); apacheGatewayConnection.handshake(); documents.add(doc); apacheGatewayConnection.writeOperations(documents); assertTrue(verifyContentSentLatch.await(10, TimeUnit.SECONDS)); } @Test public void dynamic_headers_are_added_to_the_response() throws IOException, ServerResponseException, InterruptedException { ConnectionParams.HeaderProvider headerProvider = mock(ConnectionParams.HeaderProvider.class); when(headerProvider.getHeaderValue()) .thenReturn("v1") .thenReturn("v2") .thenReturn("v3"); ConnectionParams connectionParams = new ConnectionParams.Builder() .addDynamicHeader("foo", headerProvider) .build(); CountDownLatch verifyContentSentLatch = new CountDownLatch(1); AtomicInteger counter = new AtomicInteger(1); ApacheGatewayConnection.HttpClientFactory mockFactory = mockHttpClientFactory(post -> { Header[] fooHeader = post.getHeaders("foo"); assertEquals(1, fooHeader.length); assertEquals("foo", fooHeader[0].getName()); assertEquals("v" + counter.getAndIncrement(), fooHeader[0].getValue()); verifyContentSentLatch.countDown(); return httpResponse("clientId", "3"); }); ApacheGatewayConnection apacheGatewayConnection = new ApacheGatewayConnection( Endpoint.create("hostname", 666, false), new FeedParams.Builder().build(), "", connectionParams, mockFactory, "clientId"); apacheGatewayConnection.connect(); apacheGatewayConnection.handshake(); List<Document> documents = new ArrayList<>(); documents.add(createDoc("42", "content", true)); apacheGatewayConnection.writeOperations(documents); apacheGatewayConnection.writeOperations(documents); assertTrue(verifyContentSentLatch.await(10, TimeUnit.SECONDS)); verify(headerProvider, times(3)).getHeaderValue(); } @Test private static ApacheGatewayConnection.HttpClientFactory mockHttpClientFactory(HttpExecuteMock httpExecuteMock) throws IOException { ApacheGatewayConnection.HttpClientFactory mockFactory = mock(ApacheGatewayConnection.HttpClientFactory.class); HttpClient httpClientMock = mock(HttpClient.class); when(mockFactory.createClient()).thenReturn(httpClientMock); stub(httpClientMock.execute(any())).toAnswer((Answer) invocation -> { Object[] args = invocation.getArguments(); HttpPost post = (HttpPost) args[0]; return httpExecuteMock.execute(post); }); return mockFactory; } @FunctionalInterface private interface HttpExecuteMock { HttpResponse execute(HttpPost httpPost) throws IOException; } private Document createDoc(final String docId, final String content, boolean useJson) throws IOException { return new Document(docId, content.getBytes(), null /* context */); } private void addMockedHeader( final HttpResponse httpResponseMock, final String name, final String value, HeaderElement[] elements) { final Header header = new Header() { @Override public String getName() { return name; } @Override public String getValue() { return value; } @Override public HeaderElement[] getElements() throws ParseException { return elements; } }; when(httpResponseMock.getFirstHeader(name)).thenReturn(header); } private HttpResponse httpResponse(String sessionIdInResult, String version) throws IOException { final HttpResponse httpResponseMock = mock(HttpResponse.class); StatusLine statusLineMock = mock(StatusLine.class); when(httpResponseMock.getStatusLine()).thenReturn(statusLineMock); when(statusLineMock.getStatusCode()).thenReturn(200); addMockedHeader(httpResponseMock, Headers.SESSION_ID, sessionIdInResult, null); addMockedHeader(httpResponseMock, Headers.VERSION, version, null); HeaderElement[] headerElements = new HeaderElement[1]; headerElements[0] = mock(HeaderElement.class); final HttpEntity httpEntityMock = mock(HttpEntity.class); when(httpResponseMock.getEntity()).thenReturn(httpEntityMock); final InputStream inputs = new ByteArrayInputStream("fake response data".getBytes()); when(httpEntityMock.getContent()).thenReturn(inputs); return httpResponseMock; } private static HttpResponse createErrorHttpResponse(int statusCode, String reasonPhrase, String message) throws IOException { HttpResponse response = mock(HttpResponse.class); StatusLine statusLine = mock(StatusLine.class); when(statusLine.getStatusCode()).thenReturn(statusCode); when(statusLine.getReasonPhrase()).thenReturn(reasonPhrase); when(response.getStatusLine()).thenReturn(statusLine); HttpEntity httpEntity = mock(HttpEntity.class); when(httpEntity.getContentType()).thenReturn(new BasicHeader("Content-Type", "application/json")); String json = String.format("{\"message\": \"%s\"}", message); when(httpEntity.getContent()).thenReturn(new ByteArrayInputStream(json.getBytes())); when(response.getEntity()).thenReturn(httpEntity); return response; } }
class ApacheGatewayConnectionTest { @Rule public ExpectedException expectedException = ExpectedException.none(); @Test public void testProtocolV3() throws Exception { final Endpoint endpoint = Endpoint.create("hostname", 666, false); final FeedParams feedParams = new FeedParams.Builder().setDataFormat(FeedParams.DataFormat.JSON_UTF8).build(); final String clusterSpecificRoute = ""; final ConnectionParams connectionParams = new ConnectionParams.Builder() .setEnableV3Protocol(true) .build(); final List<Document> documents = new ArrayList<>(); final String vespaDocContent ="Hello, I a JSON doc."; final String docId = "42"; final AtomicInteger requestsReceived = new AtomicInteger(0); ApacheGatewayConnection.HttpClientFactory mockFactory = mockHttpClientFactory(post -> { final Header clientIdHeader = post.getFirstHeader(Headers.CLIENT_ID); return httpResponse(clientIdHeader.getValue(), "3"); }); ApacheGatewayConnection apacheGatewayConnection = new ApacheGatewayConnection( endpoint, feedParams, clusterSpecificRoute, connectionParams, mockFactory, "clientId"); apacheGatewayConnection.connect(); apacheGatewayConnection.handshake(); documents.add(createDoc(docId, vespaDocContent, true)); apacheGatewayConnection.writeOperations(documents); } @Test(expected=IllegalArgumentException.class) public void testServerReturnsBadSessionInV3() throws Exception { final Endpoint endpoint = Endpoint.create("hostname", 666, false); final FeedParams feedParams = new FeedParams.Builder().setDataFormat(FeedParams.DataFormat.JSON_UTF8).build(); final String clusterSpecificRoute = ""; final ConnectionParams connectionParams = new ConnectionParams.Builder() .setEnableV3Protocol(true) .build(); ApacheGatewayConnection.HttpClientFactory mockFactory = mockHttpClientFactory(post -> httpResponse("Wrong Id from server", "3")); ApacheGatewayConnection apacheGatewayConnection = new ApacheGatewayConnection( endpoint, feedParams, clusterSpecificRoute, connectionParams, mockFactory, "clientId"); apacheGatewayConnection.connect(); final List<Document> documents = new ArrayList<>(); apacheGatewayConnection.writeOperations(documents); } @Test(expected=RuntimeException.class) public void testBadConfigParameters() throws Exception { final Endpoint endpoint = Endpoint.create("hostname", 666, false); final FeedParams feedParams = new FeedParams.Builder().setDataFormat(FeedParams.DataFormat.JSON_UTF8).build(); final String clusterSpecificRoute = ""; final ConnectionParams connectionParams = new ConnectionParams.Builder() .setEnableV3Protocol(true) .build(); final ApacheGatewayConnection.HttpClientFactory mockFactory = mock(ApacheGatewayConnection.HttpClientFactory.class); new ApacheGatewayConnection( endpoint, feedParams, clusterSpecificRoute, connectionParams, mockFactory, null); } @Test public void testJsonDocumentHeader() throws Exception { final Endpoint endpoint = Endpoint.create("hostname", 666, false); final FeedParams feedParams = new FeedParams.Builder().setDataFormat(FeedParams.DataFormat.JSON_UTF8).build(); final String clusterSpecificRoute = ""; final ConnectionParams connectionParams = new ConnectionParams.Builder() .setUseCompression(true) .build(); final List<Document> documents = new ArrayList<>(); final String vespaDocContent ="Hello, I a JSON doc."; final String docId = "42"; final AtomicInteger requestsReceived = new AtomicInteger(0); ApacheGatewayConnection.HttpClientFactory mockFactory = mockHttpClientFactory(post -> { final Header header = post.getFirstHeader(Headers.DATA_FORMAT); if (requestsReceived.incrementAndGet() == 1) { assert (header == null); return httpResponse("clientId", "3"); } assertNotNull(header); assertThat(header.getValue(), is(FeedParams.DataFormat.JSON_UTF8.name())); return httpResponse("clientId", "3"); }); ApacheGatewayConnection apacheGatewayConnection = new ApacheGatewayConnection( endpoint, feedParams, clusterSpecificRoute, connectionParams, mockFactory, "clientId"); apacheGatewayConnection.connect(); apacheGatewayConnection.handshake(); documents.add(createDoc(docId, vespaDocContent, true)); apacheGatewayConnection.writeOperations(documents); } @Test public void testZipAndCreateEntity() throws IOException { final String testString = "Hello world"; InputStream stream = new ByteArrayInputStream(testString.getBytes(StandardCharsets.UTF_8)); InputStreamEntity inputStreamEntity = ApacheGatewayConnection.zipAndCreateEntity(stream); final String rawContent = TestUtils.zipStreamToString(inputStreamEntity.getContent()); assert(testString.equals(rawContent)); } /** * Mocks the HttpClient, and verifies that the compressed data is sent. */ @Test public void testCompressedWriteOperations() throws Exception { final Endpoint endpoint = Endpoint.create("hostname", 666, false); final FeedParams feedParams = new FeedParams.Builder().build(); final String clusterSpecificRoute = ""; final ConnectionParams connectionParams = new ConnectionParams.Builder() .setUseCompression(true) .build(); final List<Document> documents = new ArrayList<>(); final String vespaDocContent ="Hello, I am the document data."; final String docId = "42"; final Document doc = createDoc(docId, vespaDocContent, false); ApacheGatewayConnection.HttpClientFactory mockFactory = mockHttpClientFactory(post -> { final Header header = post.getFirstHeader("Content-Encoding"); if (header != null && header.getValue().equals("gzip")) { final String rawContent = TestUtils.zipStreamToString(post.getEntity().getContent()); final String vespaHeaderText = "<vespafeed>\n"; final String vespaFooterText = "</vespafeed>\n"; assertThat(rawContent, is( doc.getOperationId() + " 38\n" + vespaHeaderText + vespaDocContent + "\n" + vespaFooterText)); } return httpResponse("clientId", "3"); }); StatusLine statusLineMock = mock(StatusLine.class); when(statusLineMock.getStatusCode()).thenReturn(200); ApacheGatewayConnection apacheGatewayConnection = new ApacheGatewayConnection( endpoint, feedParams, clusterSpecificRoute, connectionParams, mockFactory, "clientId"); apacheGatewayConnection.connect(); apacheGatewayConnection.handshake(); documents.add(doc); apacheGatewayConnection.writeOperations(documents); } @Test public void dynamic_headers_are_added_to_the_response() throws IOException, ServerResponseException, InterruptedException { ConnectionParams.HeaderProvider headerProvider = mock(ConnectionParams.HeaderProvider.class); when(headerProvider.getHeaderValue()) .thenReturn("v1") .thenReturn("v2") .thenReturn("v3"); ConnectionParams connectionParams = new ConnectionParams.Builder() .addDynamicHeader("foo", headerProvider) .build(); AtomicInteger counter = new AtomicInteger(1); ApacheGatewayConnection.HttpClientFactory mockFactory = mockHttpClientFactory(post -> { Header[] fooHeader = post.getHeaders("foo"); assertEquals(1, fooHeader.length); assertEquals("foo", fooHeader[0].getName()); assertEquals("v" + counter.getAndIncrement(), fooHeader[0].getValue()); return httpResponse("clientId", "3"); }); ApacheGatewayConnection apacheGatewayConnection = new ApacheGatewayConnection( Endpoint.create("hostname", 666, false), new FeedParams.Builder().build(), "", connectionParams, mockFactory, "clientId"); apacheGatewayConnection.connect(); apacheGatewayConnection.handshake(); List<Document> documents = new ArrayList<>(); documents.add(createDoc("42", "content", true)); apacheGatewayConnection.writeOperations(documents); apacheGatewayConnection.writeOperations(documents); verify(headerProvider, times(3)).getHeaderValue(); } @Test private static ApacheGatewayConnection.HttpClientFactory mockHttpClientFactory(HttpExecuteMock httpExecuteMock) throws IOException { ApacheGatewayConnection.HttpClientFactory mockFactory = mock(ApacheGatewayConnection.HttpClientFactory.class); HttpClient httpClientMock = mock(HttpClient.class); when(mockFactory.createClient()).thenReturn(httpClientMock); stub(httpClientMock.execute(any())).toAnswer((Answer) invocation -> { Object[] args = invocation.getArguments(); HttpPost post = (HttpPost) args[0]; return httpExecuteMock.execute(post); }); return mockFactory; } @FunctionalInterface private interface HttpExecuteMock { HttpResponse execute(HttpPost httpPost) throws IOException; } private Document createDoc(final String docId, final String content, boolean useJson) throws IOException { return new Document(docId, content.getBytes(), null /* context */); } private void addMockedHeader( final HttpResponse httpResponseMock, final String name, final String value, HeaderElement[] elements) { final Header header = new Header() { @Override public String getName() { return name; } @Override public String getValue() { return value; } @Override public HeaderElement[] getElements() throws ParseException { return elements; } }; when(httpResponseMock.getFirstHeader(name)).thenReturn(header); } private HttpResponse httpResponse(String sessionIdInResult, String version) throws IOException { final HttpResponse httpResponseMock = mock(HttpResponse.class); StatusLine statusLineMock = mock(StatusLine.class); when(httpResponseMock.getStatusLine()).thenReturn(statusLineMock); when(statusLineMock.getStatusCode()).thenReturn(200); addMockedHeader(httpResponseMock, Headers.SESSION_ID, sessionIdInResult, null); addMockedHeader(httpResponseMock, Headers.VERSION, version, null); HeaderElement[] headerElements = new HeaderElement[1]; headerElements[0] = mock(HeaderElement.class); final HttpEntity httpEntityMock = mock(HttpEntity.class); when(httpResponseMock.getEntity()).thenReturn(httpEntityMock); final InputStream inputs = new ByteArrayInputStream("fake response data".getBytes()); when(httpEntityMock.getContent()).thenReturn(inputs); return httpResponseMock; } private static HttpResponse createErrorHttpResponse(int statusCode, String reasonPhrase, String message) throws IOException { HttpResponse response = mock(HttpResponse.class); StatusLine statusLine = mock(StatusLine.class); when(statusLine.getStatusCode()).thenReturn(statusCode); when(statusLine.getReasonPhrase()).thenReturn(reasonPhrase); when(response.getStatusLine()).thenReturn(statusLine); HttpEntity httpEntity = mock(HttpEntity.class); when(httpEntity.getContentType()).thenReturn(new BasicHeader("Content-Type", "application/json")); String json = String.format("{\"message\": \"%s\"}", message); when(httpEntity.getContent()).thenReturn(new ByteArrayInputStream(json.getBytes())); when(response.getEntity()).thenReturn(httpEntity); return response; } }
should give something larger than 0 as parameter to ensure we try to read a reasonable chunk of data
private int channelRead() throws IOException { int read = channel.read(unwrapBuffer.getWritable(0)); if (read == -1) throw new ClosedChannelException(); return read; }
int read = channel.read(unwrapBuffer.getWritable(0));
private int channelRead() throws IOException { int read = channel.read(unwrapBuffer.getWritable(sessionPacketBufferSize)); if (read == -1) throw new ClosedChannelException(); return read; }
class TlsCryptoSocket implements CryptoSocket { private static final ByteBuffer NULL_BUFFER = ByteBuffer.allocate(0); private static final Logger log = Logger.getLogger(TlsCryptoSocket.class.getName()); private enum HandshakeState { NOT_STARTED, NEED_READ, NEED_WRITE, COMPLETED } private final SocketChannel channel; private final SSLEngine sslEngine; private final Buffer wrapBuffer; private final Buffer unwrapBuffer; private ByteBuffer handshakeDummyBuffer; private HandshakeState handshakeState; public TlsCryptoSocket(SocketChannel channel, SSLEngine sslEngine) { this.channel = channel; this.sslEngine = sslEngine; SSLSession nullSession = sslEngine.getSession(); this.wrapBuffer = new Buffer(nullSession.getPacketBufferSize() * 2); this.unwrapBuffer = new Buffer(nullSession.getPacketBufferSize() * 2); this.handshakeDummyBuffer = ByteBuffer.allocate(nullSession.getApplicationBufferSize()); this.handshakeState = HandshakeState.NOT_STARTED; } @Override public SocketChannel channel() { return channel; } @Override public HandshakeResult handshake() throws IOException { HandshakeState newHandshakeState = processHandshakeState(this.handshakeState); log.fine(() -> String.format("Handshake state '%s -> %s'", this.handshakeState, newHandshakeState)); this.handshakeState = newHandshakeState; return toHandshakeResult(newHandshakeState); } private HandshakeState processHandshakeState(HandshakeState state) throws IOException { switch (state) { case NOT_STARTED: sslEngine.beginHandshake(); break; case NEED_WRITE: channelWrite(); break; case NEED_READ: channelRead(); break; case COMPLETED: return HandshakeState.COMPLETED; default: throw unhandledStateException(state); } while (true) { switch (sslEngine.getHandshakeStatus()) { case NOT_HANDSHAKING: if (hasWrapBufferMoreData()) return HandshakeState.NEED_WRITE; sslEngine.setEnableSessionCreation(false); handshakeDummyBuffer = null; return HandshakeState.COMPLETED; case NEED_TASK: sslEngine.getDelegatedTask().run(); break; case NEED_UNWRAP: if (hasWrapBufferMoreData()) return HandshakeState.NEED_WRITE; if (!handshakeUnwrap()) return HandshakeState.NEED_READ; break; case NEED_WRAP: if (!handshakeWrap()) return HandshakeState.NEED_WRITE; break; default: throw new IllegalStateException("Unexpected handshake status: " + sslEngine.getHandshakeStatus()); } } } private static HandshakeResult toHandshakeResult(HandshakeState state) { switch (state) { case NEED_READ: return HandshakeResult.NEED_READ; case NEED_WRITE: return HandshakeResult.NEED_WRITE; case COMPLETED: return HandshakeResult.DONE; default: throw unhandledStateException(state); } } @Override public int getMinimumReadBufferSize() { return sslEngine.getSession().getApplicationBufferSize(); } @Override public int read(ByteBuffer dst) throws IOException { verifyHandshakeCompleted(); int bytesUnwrapped = applicationDataUnwrap(dst); if (bytesUnwrapped > 0) return bytesUnwrapped; int bytesRead = channelRead(); if (bytesRead == 0) return 0; return drain(dst); } @Override public int drain(ByteBuffer dst) throws IOException { verifyHandshakeCompleted(); int totalBytesUnwrapped = 0; int bytesUnwrapped; do { bytesUnwrapped = applicationDataUnwrap(dst); totalBytesUnwrapped += bytesUnwrapped; } while (bytesUnwrapped > 0); return totalBytesUnwrapped; } @Override public int write(ByteBuffer src) throws IOException { if (flush() == FlushResult.NEED_WRITE) return 0; int totalBytesWrapped = 0; while (src.hasRemaining()) { int bytesWrapped = applicationDataWrap(src); if (bytesWrapped == 0) break; totalBytesWrapped += bytesWrapped; } return totalBytesWrapped; } @Override public FlushResult flush() throws IOException { channelWrite(); return hasWrapBufferMoreData() ? FlushResult.NEED_WRITE : FlushResult.DONE; } private boolean handshakeWrap() throws IOException { SSLEngineResult result = sslEngineWrap(NULL_BUFFER); switch (result.getStatus()) { case OK: return true; case BUFFER_OVERFLOW: return false; default: throw unexpectedStatusException(result.getStatus()); } } private int applicationDataWrap(ByteBuffer src) throws IOException { SSLEngineResult result = sslEngineWrap(src); switch (result.getStatus()) { case OK: int bytesConsumed = result.bytesConsumed(); if (bytesConsumed == 0) throw new SSLException("Got handshake data in application data wrap"); return bytesConsumed; case BUFFER_OVERFLOW: return 0; default: throw unexpectedStatusException(result.getStatus()); } } private SSLEngineResult sslEngineWrap(ByteBuffer src) throws IOException { SSLEngineResult result = sslEngine.wrap(src, wrapBuffer.getWritable(0)); if (result.getStatus() == Status.CLOSED) throw new ClosedChannelException(); return result; } private boolean handshakeUnwrap() throws IOException { SSLEngineResult result = sslEngineUnwrap(handshakeDummyBuffer); switch (result.getStatus()) { case OK: if (result.bytesProduced() > 0) throw new SSLException("Got application data in handshake unwrap"); return true; case BUFFER_UNDERFLOW: return false; default: throw unexpectedStatusException(result.getStatus()); } } private int applicationDataUnwrap(ByteBuffer dst) throws IOException { SSLEngineResult result = sslEngineUnwrap(dst); switch (result.getStatus()) { case OK: int bytesProduced = result.bytesProduced(); if (bytesProduced == 0) throw new SSLException("Got handshake data in application data unwrap"); return bytesProduced; case BUFFER_OVERFLOW: case BUFFER_UNDERFLOW: return 0; default: throw unexpectedStatusException(result.getStatus()); } } private SSLEngineResult sslEngineUnwrap(ByteBuffer dst) throws IOException { SSLEngineResult result = sslEngine.unwrap(unwrapBuffer.getReadable(), dst); if (result.getStatus() == Status.CLOSED) throw new ClosedChannelException(); return result; } private int channelWrite() throws IOException { return channel.write(wrapBuffer.getReadable()); } private static IllegalStateException unhandledStateException(HandshakeState state) { return new IllegalStateException("Unhandled state: " + state); } private static IllegalStateException unexpectedStatusException(Status status) { return new IllegalStateException("Unexpected status: " + status); } private void verifyHandshakeCompleted() throws SSLException { if (handshakeState != HandshakeState.COMPLETED) throw new SSLException("Handshake not completed: handshakeState=" + handshakeState); } private boolean hasWrapBufferMoreData() { return wrapBuffer.bytes() > 0; } }
class TlsCryptoSocket implements CryptoSocket { private static final ByteBuffer NULL_BUFFER = ByteBuffer.allocate(0); private static final Logger log = Logger.getLogger(TlsCryptoSocket.class.getName()); private enum HandshakeState { NOT_STARTED, NEED_READ, NEED_WRITE, COMPLETED } private final SocketChannel channel; private final SSLEngine sslEngine; private final Buffer wrapBuffer; private final Buffer unwrapBuffer; private int sessionPacketBufferSize; private int sessionApplicationBufferSize; private ByteBuffer handshakeDummyBuffer; private HandshakeState handshakeState; public TlsCryptoSocket(SocketChannel channel, SSLEngine sslEngine) { this.channel = channel; this.sslEngine = sslEngine; SSLSession nullSession = sslEngine.getSession(); this.wrapBuffer = new Buffer(nullSession.getPacketBufferSize() * 2); this.unwrapBuffer = new Buffer(nullSession.getPacketBufferSize() * 2); this.handshakeDummyBuffer = ByteBuffer.allocate(nullSession.getApplicationBufferSize()); this.handshakeState = HandshakeState.NOT_STARTED; } @Override public SocketChannel channel() { return channel; } @Override public HandshakeResult handshake() throws IOException { HandshakeState newHandshakeState = processHandshakeState(this.handshakeState); log.fine(() -> String.format("Handshake state '%s -> %s'", this.handshakeState, newHandshakeState)); this.handshakeState = newHandshakeState; return toHandshakeResult(newHandshakeState); } private HandshakeState processHandshakeState(HandshakeState state) throws IOException { switch (state) { case NOT_STARTED: sslEngine.beginHandshake(); break; case NEED_WRITE: channelWrite(); break; case NEED_READ: channelRead(); break; case COMPLETED: return HandshakeState.COMPLETED; default: throw unhandledStateException(state); } while (true) { switch (sslEngine.getHandshakeStatus()) { case NOT_HANDSHAKING: if (wrapBuffer.bytes() > 0) return HandshakeState.NEED_WRITE; sslEngine.setEnableSessionCreation(false); handshakeDummyBuffer = null; SSLSession session = sslEngine.getSession(); sessionApplicationBufferSize = session.getApplicationBufferSize(); sessionPacketBufferSize = session.getPacketBufferSize(); return HandshakeState.COMPLETED; case NEED_TASK: sslEngine.getDelegatedTask().run(); break; case NEED_UNWRAP: if (wrapBuffer.bytes() > 0) return HandshakeState.NEED_WRITE; if (!handshakeUnwrap()) return HandshakeState.NEED_READ; break; case NEED_WRAP: if (!handshakeWrap()) return HandshakeState.NEED_WRITE; break; default: throw new IllegalStateException("Unexpected handshake status: " + sslEngine.getHandshakeStatus()); } } } private static HandshakeResult toHandshakeResult(HandshakeState state) { switch (state) { case NEED_READ: return HandshakeResult.NEED_READ; case NEED_WRITE: return HandshakeResult.NEED_WRITE; case COMPLETED: return HandshakeResult.DONE; default: throw unhandledStateException(state); } } @Override public int getMinimumReadBufferSize() { return sessionApplicationBufferSize; } @Override public int read(ByteBuffer dst) throws IOException { verifyHandshakeCompleted(); int bytesUnwrapped = drain(dst); if (bytesUnwrapped > 0) return bytesUnwrapped; int bytesRead = channelRead(); if (bytesRead == 0) return 0; return drain(dst); } @Override public int drain(ByteBuffer dst) throws IOException { verifyHandshakeCompleted(); int totalBytesUnwrapped = 0; int bytesUnwrapped; do { bytesUnwrapped = applicationDataUnwrap(dst); totalBytesUnwrapped += bytesUnwrapped; } while (bytesUnwrapped > 0); return totalBytesUnwrapped; } @Override public int write(ByteBuffer src) throws IOException { if (flush() == FlushResult.NEED_WRITE) return 0; int totalBytesWrapped = 0; int bytesWrapped; do { bytesWrapped = applicationDataWrap(src); totalBytesWrapped += bytesWrapped; } while (bytesWrapped > 0 && wrapBuffer.bytes() < sessionPacketBufferSize); return totalBytesWrapped; } @Override public FlushResult flush() throws IOException { channelWrite(); return wrapBuffer.bytes() > 0 ? FlushResult.NEED_WRITE : FlushResult.DONE; } private boolean handshakeWrap() throws IOException { SSLEngineResult result = sslEngineWrap(NULL_BUFFER); switch (result.getStatus()) { case OK: return true; case BUFFER_OVERFLOW: return false; default: throw unexpectedStatusException(result.getStatus()); } } private int applicationDataWrap(ByteBuffer src) throws IOException { SSLEngineResult result = sslEngineWrap(src); if (result.getHandshakeStatus() != HandshakeStatus.NOT_HANDSHAKING) throw new SSLException("Renegotiation detected"); switch (result.getStatus()) { case OK: return result.bytesConsumed(); case BUFFER_OVERFLOW: return 0; default: throw unexpectedStatusException(result.getStatus()); } } private SSLEngineResult sslEngineWrap(ByteBuffer src) throws IOException { SSLEngineResult result = sslEngine.wrap(src, wrapBuffer.getWritable(sessionPacketBufferSize)); if (result.getStatus() == Status.CLOSED) throw new ClosedChannelException(); return result; } private boolean handshakeUnwrap() throws IOException { SSLEngineResult result = sslEngineUnwrap(handshakeDummyBuffer); switch (result.getStatus()) { case OK: if (result.bytesProduced() > 0) throw new SSLException("Got application data in handshake unwrap"); return true; case BUFFER_UNDERFLOW: return false; default: throw unexpectedStatusException(result.getStatus()); } } private int applicationDataUnwrap(ByteBuffer dst) throws IOException { SSLEngineResult result = sslEngineUnwrap(dst); if (result.getHandshakeStatus() != HandshakeStatus.NOT_HANDSHAKING) throw new SSLException("Renegotiation detected"); switch (result.getStatus()) { case OK: return result.bytesProduced(); case BUFFER_OVERFLOW: case BUFFER_UNDERFLOW: return 0; default: throw unexpectedStatusException(result.getStatus()); } } private SSLEngineResult sslEngineUnwrap(ByteBuffer dst) throws IOException { SSLEngineResult result = sslEngine.unwrap(unwrapBuffer.getReadable(), dst); if (result.getStatus() == Status.CLOSED) throw new ClosedChannelException(); return result; } private int channelWrite() throws IOException { return channel.write(wrapBuffer.getReadable()); } private static IllegalStateException unhandledStateException(HandshakeState state) { return new IllegalStateException("Unhandled state: " + state); } private static IllegalStateException unexpectedStatusException(Status status) { return new IllegalStateException("Unexpected status: " + status); } private void verifyHandshakeCompleted() throws SSLException { if (handshakeState != HandshakeState.COMPLETED) throw new SSLException("Handshake not completed: handshakeState=" + handshakeState); } }
Even though your mother will probably optimize this one for you I consider it a good habit to do String structPrefix = baseFieldName + "."; and then use that in the filter
public Collection<Attribute> structFieldAttributes(String baseFieldName) { return attributes().stream() .filter(attribute -> attribute.getName().startsWith(baseFieldName + ".")) .collect(Collectors.toList()); }
.filter(attribute -> attribute.getName().startsWith(baseFieldName + "."))
public Collection<Attribute> structFieldAttributes(String baseFieldName) { String structPrefix = baseFieldName + "."; return attributes().stream() .filter(attribute -> attribute.getName().startsWith(structPrefix)) .collect(Collectors.toList()); }
class AttributeFields extends Derived implements AttributesConfig.Producer { public enum FieldSet {ALL, FAST_ACCESS} private Map<String, Attribute> attributes = new java.util.LinkedHashMap<>(); private Map<String, Attribute> importedAttributes = new java.util.LinkedHashMap<>(); /** Whether this has any position attribute */ private boolean hasPosition = false; public AttributeFields(Search search) { derive(search); } /** Derives everything from a field */ @Override protected void derive(ImmutableSDField field, Search search) { if (unsupportedFieldType(field)) { return; } if (field.isImportedField()) { deriveImportedAttributes(field); } else if (isArrayOfSimpleStruct(field)) { deriveArrayOfSimpleStruct(field); } else if (isMapOfSimpleStruct(field)) { deriveMapOfSimpleStruct(field); } else if (isMapOfPrimitiveType(field)) { deriveMapOfPrimitiveType(field); } else { deriveAttributes(field); } } private static boolean unsupportedFieldType(ImmutableSDField field) { return (field.usesStructOrMap() && !isArrayOfSimpleStruct(field) && !isMapOfSimpleStruct(field) && !isMapOfPrimitiveType(field) && !field.getDataType().equals(PositionDataType.INSTANCE) && !field.getDataType().equals(DataType.getArray(PositionDataType.INSTANCE))); } /** Returns an attribute by name, or null if it doesn't exist */ public Attribute getAttribute(String attributeName) { return attributes.get(attributeName); } public boolean containsAttribute(String attributeName) { return getAttribute(attributeName) != null; } /** Derives one attribute. TODO: Support non-default named attributes */ private void deriveAttributes(ImmutableSDField field) { for (Attribute fieldAttribute : field.getAttributes().values()) { deriveAttribute(field, fieldAttribute); } if (field.containsExpression(ToPositionExpression.class)) { if (hasPosition) { throw new IllegalArgumentException("Can not specify more than one set of position attributes per field: " + field.getName()); } hasPosition = true; } } private void deriveAttribute(ImmutableSDField field, Attribute fieldAttribute) { Attribute attribute = getAttribute(fieldAttribute.getName()); if (attribute == null) { attributes.put(fieldAttribute.getName(), fieldAttribute); attribute = getAttribute(fieldAttribute.getName()); } Ranking ranking = field.getRanking(); if (ranking != null && ranking.isFilter()) { attribute.setEnableBitVectors(true); attribute.setEnableOnlyBitVector(true); } } private void deriveImportedAttributes(ImmutableSDField field) { for (Attribute attribute : field.getAttributes().values()) { if (!importedAttributes.containsKey(field.getName())) { importedAttributes.put(field.getName(), attribute); } } } private void deriveArrayOfSimpleStruct(ImmutableSDField field) { for (ImmutableSDField structField : field.getStructFields()) { deriveAttributeAsArrayType(structField); } } private void deriveAttributeAsArrayType(ImmutableSDField field) { Attribute attribute = field.getAttributes().get(field.getName()); if (attribute != null) { attributes.put(attribute.getName(), attribute.convertToArray()); } } private void deriveMapOfSimpleStruct(ImmutableSDField field) { deriveAttributeAsArrayType(field.getStructField("key")); deriveMapValueField(field.getStructField("value")); } private void deriveMapValueField(ImmutableSDField valueField) { for (ImmutableSDField structField : valueField.getStructFields()) { deriveAttributeAsArrayType(structField); } } private void deriveMapOfPrimitiveType(ImmutableSDField field) { deriveAttributeAsArrayType(field.getStructField("key")); deriveAttributeAsArrayType(field.getStructField("value")); } /** Returns a read only attribute iterator */ public Iterator attributeIterator() { return attributes().iterator(); } public Collection<Attribute> attributes() { return Collections.unmodifiableCollection(attributes.values()); } public String toString() { return "attributes " + getName(); } @Override protected String getDerivedName() { return "attributes"; } private Map<String, AttributesConfig.Attribute.Builder> toMap(List<AttributesConfig.Attribute.Builder> ls) { Map<String, AttributesConfig.Attribute.Builder> ret = new LinkedHashMap<>(); for (AttributesConfig.Attribute.Builder builder : ls) { ret.put((String) ConfigInstanceUtil.getField(builder, "name"), builder); } return ret; } @Override public void getConfig(AttributesConfig.Builder builder) { getConfig(builder, FieldSet.ALL); } private boolean isAttributeInFieldSet(Attribute attribute, FieldSet fs) { return (fs == FieldSet.ALL) || ((fs == FieldSet.FAST_ACCESS) && attribute.isFastAccess()); } private AttributesConfig.Attribute.Builder getConfig(String attrName, Attribute attribute, boolean imported) { AttributesConfig.Attribute.Builder aaB = new AttributesConfig.Attribute.Builder() .name(attrName) .datatype(AttributesConfig.Attribute.Datatype.Enum.valueOf(attribute.getType().getExportAttributeTypeName())) .collectiontype(AttributesConfig.Attribute.Collectiontype.Enum.valueOf(attribute.getCollectionType().getName())); if (attribute.isRemoveIfZero()) { aaB.removeifzero(true); } if (attribute.isCreateIfNonExistent()) { aaB.createifnonexistent(true); } aaB.enablebitvectors(attribute.isEnabledBitVectors()); aaB.enableonlybitvector(attribute.isEnabledOnlyBitVector()); if (attribute.isFastSearch()) { aaB.fastsearch(true); } if (attribute.isFastAccess()) { aaB.fastaccess(true); } if (attribute.isHuge()) { aaB.huge(true); } if (attribute.getSorting().isDescending()) { aaB.sortascending(false); } if (attribute.getSorting().getFunction() != Sorting.Function.UCA) { aaB.sortfunction(AttributesConfig.Attribute.Sortfunction.Enum.valueOf(attribute.getSorting().getFunction().toString())); } if (attribute.getSorting().getStrength() != Sorting.Strength.PRIMARY) { aaB.sortstrength(AttributesConfig.Attribute.Sortstrength.Enum.valueOf(attribute.getSorting().getStrength().toString())); } if (!attribute.getSorting().getLocale().isEmpty()) { aaB.sortlocale(attribute.getSorting().getLocale()); } aaB.arity(attribute.arity()); aaB.lowerbound(attribute.lowerBound()); aaB.upperbound(attribute.upperBound()); aaB.densepostinglistthreshold(attribute.densePostingListThreshold()); if (attribute.tensorType().isPresent()) { aaB.tensortype(attribute.tensorType().get().toString()); } aaB.imported(imported); return aaB; } public void getConfig(AttributesConfig.Builder builder, FieldSet fs) { for (Attribute attribute : attributes.values()) { if (isAttributeInFieldSet(attribute, fs)) { builder.attribute(getConfig(attribute.getName(), attribute, false)); } } if (fs == FieldSet.ALL) { for (Map.Entry<String, Attribute> entry : importedAttributes.entrySet()) { builder.attribute(getConfig(entry.getKey(), entry.getValue(), true)); } } } }
class AttributeFields extends Derived implements AttributesConfig.Producer { public enum FieldSet {ALL, FAST_ACCESS} private Map<String, Attribute> attributes = new java.util.LinkedHashMap<>(); private Map<String, Attribute> importedAttributes = new java.util.LinkedHashMap<>(); /** Whether this has any position attribute */ private boolean hasPosition = false; public AttributeFields(Search search) { derive(search); } /** Derives everything from a field */ @Override protected void derive(ImmutableSDField field, Search search) { if (unsupportedFieldType(field)) { return; } if (field.isImportedField()) { deriveImportedAttributes(field); } else if (isArrayOfSimpleStruct(field)) { deriveArrayOfSimpleStruct(field); } else if (isMapOfSimpleStruct(field)) { deriveMapOfSimpleStruct(field); } else if (isMapOfPrimitiveType(field)) { deriveMapOfPrimitiveType(field); } else { deriveAttributes(field); } } private static boolean unsupportedFieldType(ImmutableSDField field) { return (field.usesStructOrMap() && !isArrayOfSimpleStruct(field) && !isMapOfSimpleStruct(field) && !isMapOfPrimitiveType(field) && !field.getDataType().equals(PositionDataType.INSTANCE) && !field.getDataType().equals(DataType.getArray(PositionDataType.INSTANCE))); } /** Returns an attribute by name, or null if it doesn't exist */ public Attribute getAttribute(String attributeName) { return attributes.get(attributeName); } public boolean containsAttribute(String attributeName) { return getAttribute(attributeName) != null; } /** Derives one attribute. TODO: Support non-default named attributes */ private void deriveAttributes(ImmutableSDField field) { for (Attribute fieldAttribute : field.getAttributes().values()) { deriveAttribute(field, fieldAttribute); } if (field.containsExpression(ToPositionExpression.class)) { if (hasPosition) { throw new IllegalArgumentException("Can not specify more than one set of position attributes per field: " + field.getName()); } hasPosition = true; } } private void deriveAttribute(ImmutableSDField field, Attribute fieldAttribute) { Attribute attribute = getAttribute(fieldAttribute.getName()); if (attribute == null) { attributes.put(fieldAttribute.getName(), fieldAttribute); attribute = getAttribute(fieldAttribute.getName()); } Ranking ranking = field.getRanking(); if (ranking != null && ranking.isFilter()) { attribute.setEnableBitVectors(true); attribute.setEnableOnlyBitVector(true); } } private void deriveImportedAttributes(ImmutableSDField field) { for (Attribute attribute : field.getAttributes().values()) { if (!importedAttributes.containsKey(field.getName())) { importedAttributes.put(field.getName(), attribute); } } } private void deriveArrayOfSimpleStruct(ImmutableSDField field) { for (ImmutableSDField structField : field.getStructFields()) { deriveAttributeAsArrayType(structField); } } private void deriveAttributeAsArrayType(ImmutableSDField field) { Attribute attribute = field.getAttributes().get(field.getName()); if (attribute != null) { attributes.put(attribute.getName(), attribute.convertToArray()); } } private void deriveMapOfSimpleStruct(ImmutableSDField field) { deriveAttributeAsArrayType(field.getStructField("key")); deriveMapValueField(field.getStructField("value")); } private void deriveMapValueField(ImmutableSDField valueField) { for (ImmutableSDField structField : valueField.getStructFields()) { deriveAttributeAsArrayType(structField); } } private void deriveMapOfPrimitiveType(ImmutableSDField field) { deriveAttributeAsArrayType(field.getStructField("key")); deriveAttributeAsArrayType(field.getStructField("value")); } /** Returns a read only attribute iterator */ public Iterator attributeIterator() { return attributes().iterator(); } public Collection<Attribute> attributes() { return Collections.unmodifiableCollection(attributes.values()); } public String toString() { return "attributes " + getName(); } @Override protected String getDerivedName() { return "attributes"; } private Map<String, AttributesConfig.Attribute.Builder> toMap(List<AttributesConfig.Attribute.Builder> ls) { Map<String, AttributesConfig.Attribute.Builder> ret = new LinkedHashMap<>(); for (AttributesConfig.Attribute.Builder builder : ls) { ret.put((String) ConfigInstanceUtil.getField(builder, "name"), builder); } return ret; } @Override public void getConfig(AttributesConfig.Builder builder) { getConfig(builder, FieldSet.ALL); } private boolean isAttributeInFieldSet(Attribute attribute, FieldSet fs) { return (fs == FieldSet.ALL) || ((fs == FieldSet.FAST_ACCESS) && attribute.isFastAccess()); } private AttributesConfig.Attribute.Builder getConfig(String attrName, Attribute attribute, boolean imported) { AttributesConfig.Attribute.Builder aaB = new AttributesConfig.Attribute.Builder() .name(attrName) .datatype(AttributesConfig.Attribute.Datatype.Enum.valueOf(attribute.getType().getExportAttributeTypeName())) .collectiontype(AttributesConfig.Attribute.Collectiontype.Enum.valueOf(attribute.getCollectionType().getName())); if (attribute.isRemoveIfZero()) { aaB.removeifzero(true); } if (attribute.isCreateIfNonExistent()) { aaB.createifnonexistent(true); } aaB.enablebitvectors(attribute.isEnabledBitVectors()); aaB.enableonlybitvector(attribute.isEnabledOnlyBitVector()); if (attribute.isFastSearch()) { aaB.fastsearch(true); } if (attribute.isFastAccess()) { aaB.fastaccess(true); } if (attribute.isHuge()) { aaB.huge(true); } if (attribute.getSorting().isDescending()) { aaB.sortascending(false); } if (attribute.getSorting().getFunction() != Sorting.Function.UCA) { aaB.sortfunction(AttributesConfig.Attribute.Sortfunction.Enum.valueOf(attribute.getSorting().getFunction().toString())); } if (attribute.getSorting().getStrength() != Sorting.Strength.PRIMARY) { aaB.sortstrength(AttributesConfig.Attribute.Sortstrength.Enum.valueOf(attribute.getSorting().getStrength().toString())); } if (!attribute.getSorting().getLocale().isEmpty()) { aaB.sortlocale(attribute.getSorting().getLocale()); } aaB.arity(attribute.arity()); aaB.lowerbound(attribute.lowerBound()); aaB.upperbound(attribute.upperBound()); aaB.densepostinglistthreshold(attribute.densePostingListThreshold()); if (attribute.tensorType().isPresent()) { aaB.tensortype(attribute.tensorType().get().toString()); } aaB.imported(imported); return aaB; } public void getConfig(AttributesConfig.Builder builder, FieldSet fs) { for (Attribute attribute : attributes.values()) { if (isAttributeInFieldSet(attribute, fs)) { builder.attribute(getConfig(attribute.getName(), attribute, false)); } } if (fs == FieldSet.ALL) { for (Map.Entry<String, Attribute> entry : importedAttributes.entrySet()) { builder.attribute(getConfig(entry.getKey(), entry.getValue(), true)); } } } }
Fixed
public Collection<Attribute> structFieldAttributes(String baseFieldName) { return attributes().stream() .filter(attribute -> attribute.getName().startsWith(baseFieldName + ".")) .collect(Collectors.toList()); }
.filter(attribute -> attribute.getName().startsWith(baseFieldName + "."))
public Collection<Attribute> structFieldAttributes(String baseFieldName) { String structPrefix = baseFieldName + "."; return attributes().stream() .filter(attribute -> attribute.getName().startsWith(structPrefix)) .collect(Collectors.toList()); }
class AttributeFields extends Derived implements AttributesConfig.Producer { public enum FieldSet {ALL, FAST_ACCESS} private Map<String, Attribute> attributes = new java.util.LinkedHashMap<>(); private Map<String, Attribute> importedAttributes = new java.util.LinkedHashMap<>(); /** Whether this has any position attribute */ private boolean hasPosition = false; public AttributeFields(Search search) { derive(search); } /** Derives everything from a field */ @Override protected void derive(ImmutableSDField field, Search search) { if (unsupportedFieldType(field)) { return; } if (field.isImportedField()) { deriveImportedAttributes(field); } else if (isArrayOfSimpleStruct(field)) { deriveArrayOfSimpleStruct(field); } else if (isMapOfSimpleStruct(field)) { deriveMapOfSimpleStruct(field); } else if (isMapOfPrimitiveType(field)) { deriveMapOfPrimitiveType(field); } else { deriveAttributes(field); } } private static boolean unsupportedFieldType(ImmutableSDField field) { return (field.usesStructOrMap() && !isArrayOfSimpleStruct(field) && !isMapOfSimpleStruct(field) && !isMapOfPrimitiveType(field) && !field.getDataType().equals(PositionDataType.INSTANCE) && !field.getDataType().equals(DataType.getArray(PositionDataType.INSTANCE))); } /** Returns an attribute by name, or null if it doesn't exist */ public Attribute getAttribute(String attributeName) { return attributes.get(attributeName); } public boolean containsAttribute(String attributeName) { return getAttribute(attributeName) != null; } /** Derives one attribute. TODO: Support non-default named attributes */ private void deriveAttributes(ImmutableSDField field) { for (Attribute fieldAttribute : field.getAttributes().values()) { deriveAttribute(field, fieldAttribute); } if (field.containsExpression(ToPositionExpression.class)) { if (hasPosition) { throw new IllegalArgumentException("Can not specify more than one set of position attributes per field: " + field.getName()); } hasPosition = true; } } private void deriveAttribute(ImmutableSDField field, Attribute fieldAttribute) { Attribute attribute = getAttribute(fieldAttribute.getName()); if (attribute == null) { attributes.put(fieldAttribute.getName(), fieldAttribute); attribute = getAttribute(fieldAttribute.getName()); } Ranking ranking = field.getRanking(); if (ranking != null && ranking.isFilter()) { attribute.setEnableBitVectors(true); attribute.setEnableOnlyBitVector(true); } } private void deriveImportedAttributes(ImmutableSDField field) { for (Attribute attribute : field.getAttributes().values()) { if (!importedAttributes.containsKey(field.getName())) { importedAttributes.put(field.getName(), attribute); } } } private void deriveArrayOfSimpleStruct(ImmutableSDField field) { for (ImmutableSDField structField : field.getStructFields()) { deriveAttributeAsArrayType(structField); } } private void deriveAttributeAsArrayType(ImmutableSDField field) { Attribute attribute = field.getAttributes().get(field.getName()); if (attribute != null) { attributes.put(attribute.getName(), attribute.convertToArray()); } } private void deriveMapOfSimpleStruct(ImmutableSDField field) { deriveAttributeAsArrayType(field.getStructField("key")); deriveMapValueField(field.getStructField("value")); } private void deriveMapValueField(ImmutableSDField valueField) { for (ImmutableSDField structField : valueField.getStructFields()) { deriveAttributeAsArrayType(structField); } } private void deriveMapOfPrimitiveType(ImmutableSDField field) { deriveAttributeAsArrayType(field.getStructField("key")); deriveAttributeAsArrayType(field.getStructField("value")); } /** Returns a read only attribute iterator */ public Iterator attributeIterator() { return attributes().iterator(); } public Collection<Attribute> attributes() { return Collections.unmodifiableCollection(attributes.values()); } public String toString() { return "attributes " + getName(); } @Override protected String getDerivedName() { return "attributes"; } private Map<String, AttributesConfig.Attribute.Builder> toMap(List<AttributesConfig.Attribute.Builder> ls) { Map<String, AttributesConfig.Attribute.Builder> ret = new LinkedHashMap<>(); for (AttributesConfig.Attribute.Builder builder : ls) { ret.put((String) ConfigInstanceUtil.getField(builder, "name"), builder); } return ret; } @Override public void getConfig(AttributesConfig.Builder builder) { getConfig(builder, FieldSet.ALL); } private boolean isAttributeInFieldSet(Attribute attribute, FieldSet fs) { return (fs == FieldSet.ALL) || ((fs == FieldSet.FAST_ACCESS) && attribute.isFastAccess()); } private AttributesConfig.Attribute.Builder getConfig(String attrName, Attribute attribute, boolean imported) { AttributesConfig.Attribute.Builder aaB = new AttributesConfig.Attribute.Builder() .name(attrName) .datatype(AttributesConfig.Attribute.Datatype.Enum.valueOf(attribute.getType().getExportAttributeTypeName())) .collectiontype(AttributesConfig.Attribute.Collectiontype.Enum.valueOf(attribute.getCollectionType().getName())); if (attribute.isRemoveIfZero()) { aaB.removeifzero(true); } if (attribute.isCreateIfNonExistent()) { aaB.createifnonexistent(true); } aaB.enablebitvectors(attribute.isEnabledBitVectors()); aaB.enableonlybitvector(attribute.isEnabledOnlyBitVector()); if (attribute.isFastSearch()) { aaB.fastsearch(true); } if (attribute.isFastAccess()) { aaB.fastaccess(true); } if (attribute.isHuge()) { aaB.huge(true); } if (attribute.getSorting().isDescending()) { aaB.sortascending(false); } if (attribute.getSorting().getFunction() != Sorting.Function.UCA) { aaB.sortfunction(AttributesConfig.Attribute.Sortfunction.Enum.valueOf(attribute.getSorting().getFunction().toString())); } if (attribute.getSorting().getStrength() != Sorting.Strength.PRIMARY) { aaB.sortstrength(AttributesConfig.Attribute.Sortstrength.Enum.valueOf(attribute.getSorting().getStrength().toString())); } if (!attribute.getSorting().getLocale().isEmpty()) { aaB.sortlocale(attribute.getSorting().getLocale()); } aaB.arity(attribute.arity()); aaB.lowerbound(attribute.lowerBound()); aaB.upperbound(attribute.upperBound()); aaB.densepostinglistthreshold(attribute.densePostingListThreshold()); if (attribute.tensorType().isPresent()) { aaB.tensortype(attribute.tensorType().get().toString()); } aaB.imported(imported); return aaB; } public void getConfig(AttributesConfig.Builder builder, FieldSet fs) { for (Attribute attribute : attributes.values()) { if (isAttributeInFieldSet(attribute, fs)) { builder.attribute(getConfig(attribute.getName(), attribute, false)); } } if (fs == FieldSet.ALL) { for (Map.Entry<String, Attribute> entry : importedAttributes.entrySet()) { builder.attribute(getConfig(entry.getKey(), entry.getValue(), true)); } } } }
class AttributeFields extends Derived implements AttributesConfig.Producer { public enum FieldSet {ALL, FAST_ACCESS} private Map<String, Attribute> attributes = new java.util.LinkedHashMap<>(); private Map<String, Attribute> importedAttributes = new java.util.LinkedHashMap<>(); /** Whether this has any position attribute */ private boolean hasPosition = false; public AttributeFields(Search search) { derive(search); } /** Derives everything from a field */ @Override protected void derive(ImmutableSDField field, Search search) { if (unsupportedFieldType(field)) { return; } if (field.isImportedField()) { deriveImportedAttributes(field); } else if (isArrayOfSimpleStruct(field)) { deriveArrayOfSimpleStruct(field); } else if (isMapOfSimpleStruct(field)) { deriveMapOfSimpleStruct(field); } else if (isMapOfPrimitiveType(field)) { deriveMapOfPrimitiveType(field); } else { deriveAttributes(field); } } private static boolean unsupportedFieldType(ImmutableSDField field) { return (field.usesStructOrMap() && !isArrayOfSimpleStruct(field) && !isMapOfSimpleStruct(field) && !isMapOfPrimitiveType(field) && !field.getDataType().equals(PositionDataType.INSTANCE) && !field.getDataType().equals(DataType.getArray(PositionDataType.INSTANCE))); } /** Returns an attribute by name, or null if it doesn't exist */ public Attribute getAttribute(String attributeName) { return attributes.get(attributeName); } public boolean containsAttribute(String attributeName) { return getAttribute(attributeName) != null; } /** Derives one attribute. TODO: Support non-default named attributes */ private void deriveAttributes(ImmutableSDField field) { for (Attribute fieldAttribute : field.getAttributes().values()) { deriveAttribute(field, fieldAttribute); } if (field.containsExpression(ToPositionExpression.class)) { if (hasPosition) { throw new IllegalArgumentException("Can not specify more than one set of position attributes per field: " + field.getName()); } hasPosition = true; } } private void deriveAttribute(ImmutableSDField field, Attribute fieldAttribute) { Attribute attribute = getAttribute(fieldAttribute.getName()); if (attribute == null) { attributes.put(fieldAttribute.getName(), fieldAttribute); attribute = getAttribute(fieldAttribute.getName()); } Ranking ranking = field.getRanking(); if (ranking != null && ranking.isFilter()) { attribute.setEnableBitVectors(true); attribute.setEnableOnlyBitVector(true); } } private void deriveImportedAttributes(ImmutableSDField field) { for (Attribute attribute : field.getAttributes().values()) { if (!importedAttributes.containsKey(field.getName())) { importedAttributes.put(field.getName(), attribute); } } } private void deriveArrayOfSimpleStruct(ImmutableSDField field) { for (ImmutableSDField structField : field.getStructFields()) { deriveAttributeAsArrayType(structField); } } private void deriveAttributeAsArrayType(ImmutableSDField field) { Attribute attribute = field.getAttributes().get(field.getName()); if (attribute != null) { attributes.put(attribute.getName(), attribute.convertToArray()); } } private void deriveMapOfSimpleStruct(ImmutableSDField field) { deriveAttributeAsArrayType(field.getStructField("key")); deriveMapValueField(field.getStructField("value")); } private void deriveMapValueField(ImmutableSDField valueField) { for (ImmutableSDField structField : valueField.getStructFields()) { deriveAttributeAsArrayType(structField); } } private void deriveMapOfPrimitiveType(ImmutableSDField field) { deriveAttributeAsArrayType(field.getStructField("key")); deriveAttributeAsArrayType(field.getStructField("value")); } /** Returns a read only attribute iterator */ public Iterator attributeIterator() { return attributes().iterator(); } public Collection<Attribute> attributes() { return Collections.unmodifiableCollection(attributes.values()); } public String toString() { return "attributes " + getName(); } @Override protected String getDerivedName() { return "attributes"; } private Map<String, AttributesConfig.Attribute.Builder> toMap(List<AttributesConfig.Attribute.Builder> ls) { Map<String, AttributesConfig.Attribute.Builder> ret = new LinkedHashMap<>(); for (AttributesConfig.Attribute.Builder builder : ls) { ret.put((String) ConfigInstanceUtil.getField(builder, "name"), builder); } return ret; } @Override public void getConfig(AttributesConfig.Builder builder) { getConfig(builder, FieldSet.ALL); } private boolean isAttributeInFieldSet(Attribute attribute, FieldSet fs) { return (fs == FieldSet.ALL) || ((fs == FieldSet.FAST_ACCESS) && attribute.isFastAccess()); } private AttributesConfig.Attribute.Builder getConfig(String attrName, Attribute attribute, boolean imported) { AttributesConfig.Attribute.Builder aaB = new AttributesConfig.Attribute.Builder() .name(attrName) .datatype(AttributesConfig.Attribute.Datatype.Enum.valueOf(attribute.getType().getExportAttributeTypeName())) .collectiontype(AttributesConfig.Attribute.Collectiontype.Enum.valueOf(attribute.getCollectionType().getName())); if (attribute.isRemoveIfZero()) { aaB.removeifzero(true); } if (attribute.isCreateIfNonExistent()) { aaB.createifnonexistent(true); } aaB.enablebitvectors(attribute.isEnabledBitVectors()); aaB.enableonlybitvector(attribute.isEnabledOnlyBitVector()); if (attribute.isFastSearch()) { aaB.fastsearch(true); } if (attribute.isFastAccess()) { aaB.fastaccess(true); } if (attribute.isHuge()) { aaB.huge(true); } if (attribute.getSorting().isDescending()) { aaB.sortascending(false); } if (attribute.getSorting().getFunction() != Sorting.Function.UCA) { aaB.sortfunction(AttributesConfig.Attribute.Sortfunction.Enum.valueOf(attribute.getSorting().getFunction().toString())); } if (attribute.getSorting().getStrength() != Sorting.Strength.PRIMARY) { aaB.sortstrength(AttributesConfig.Attribute.Sortstrength.Enum.valueOf(attribute.getSorting().getStrength().toString())); } if (!attribute.getSorting().getLocale().isEmpty()) { aaB.sortlocale(attribute.getSorting().getLocale()); } aaB.arity(attribute.arity()); aaB.lowerbound(attribute.lowerBound()); aaB.upperbound(attribute.upperBound()); aaB.densepostinglistthreshold(attribute.densePostingListThreshold()); if (attribute.tensorType().isPresent()) { aaB.tensortype(attribute.tensorType().get().toString()); } aaB.imported(imported); return aaB; } public void getConfig(AttributesConfig.Builder builder, FieldSet fs) { for (Attribute attribute : attributes.values()) { if (isAttributeInFieldSet(attribute, fs)) { builder.attribute(getConfig(attribute.getName(), attribute, false)); } } if (fs == FieldSet.ALL) { for (Map.Entry<String, Attribute> entry : importedAttributes.entrySet()) { builder.attribute(getConfig(entry.getKey(), entry.getValue(), true)); } } } }
Why not just try with resources?
private HealthInfo probeHealth() throws Exception { HttpGet httpget = new HttpGet(endpoint.getStateV1HealthUrl().toString()); CloseableHttpClient httpClient = this.httpClient; if (httpClient == null) { throw new IllegalStateException("HTTP client never started or has closed"); } CloseableHttpResponse httpResponse = httpClient.execute(httpget); try { int httpStatusCode = httpResponse.getStatusLine().getStatusCode(); if (httpStatusCode < 200 || httpStatusCode >= 300) { return HealthInfo.fromBadHttpStatusCode(httpStatusCode); } HttpEntity bodyEntity = httpResponse.getEntity(); long contentLength = bodyEntity.getContentLength(); if (contentLength > MAX_CONTENT_LENGTH) { throw new IllegalArgumentException("Content too long: " + contentLength + " bytes"); } String body = getContentFunction.apply(bodyEntity); HealthResponse healthResponse = mapper.readValue(body, HealthResponse.class); if (healthResponse.status == null || healthResponse.status.code == null) { return HealthInfo.fromHealthStatusCode(HealthResponse.Status.DEFAULT_STATUS); } else { return HealthInfo.fromHealthStatusCode(healthResponse.status.code); } } finally { httpResponse.close(); } }
CloseableHttpResponse httpResponse = httpClient.execute(httpget);
private HealthInfo probeHealth() throws Exception { HttpGet httpget = new HttpGet(endpoint.getStateV1HealthUrl().toString()); CloseableHttpClient httpClient = this.httpClient; if (httpClient == null) { throw new IllegalStateException("HTTP client never started or has closed"); } try (CloseableHttpResponse httpResponse = httpClient.execute(httpget)) { int httpStatusCode = httpResponse.getStatusLine().getStatusCode(); if (httpStatusCode < 200 || httpStatusCode >= 300) { return HealthInfo.fromBadHttpStatusCode(httpStatusCode); } HttpEntity bodyEntity = httpResponse.getEntity(); long contentLength = bodyEntity.getContentLength(); if (contentLength > MAX_CONTENT_LENGTH) { throw new IllegalArgumentException("Content too long: " + contentLength + " bytes"); } String body = getContentFunction.apply(bodyEntity); HealthResponse healthResponse = mapper.readValue(body, HealthResponse.class); if (healthResponse.status == null || healthResponse.status.code == null) { return HealthInfo.fromHealthStatusCode(HealthResponse.Status.DEFAULT_STATUS); } else { return HealthInfo.fromHealthStatusCode(healthResponse.status.code); } } }
class HealthClient implements AutoCloseable, ServiceIdentityProvider.Listener { private static final ObjectMapper mapper = new ObjectMapper(); private static final long MAX_CONTENT_LENGTH = 1L << 20; private static final int DEFAULT_TIMEOUT_MILLIS = 1_000; private static final ConnectionKeepAliveStrategy KEEP_ALIVE_STRATEGY = new DefaultConnectionKeepAliveStrategy() { @Override public long getKeepAliveDuration(HttpResponse response, HttpContext context) { long keepAlive = super.getKeepAliveDuration(response, context); if (keepAlive == -1) { keepAlive = 60000; } return keepAlive; } }; private final HealthEndpoint endpoint; private final Supplier<CloseableHttpClient> clientSupplier; private final Function<HttpEntity, String> getContentFunction; private CloseableHttpClient httpClient = null; public HealthClient(HealthEndpoint endpoint) { this(endpoint, () -> makeCloseableHttpClient(endpoint), entity -> uncheck(() -> EntityUtils.toString(entity))); } /** For testing. */ HealthClient(HealthEndpoint endpoint, Supplier<CloseableHttpClient> clientSupplier, Function<HttpEntity, String> getContentFunction) { this.endpoint = endpoint; this.clientSupplier = clientSupplier; this.getContentFunction = getContentFunction; } public void start() { updateHttpClient(); endpoint.registerListener(this); } @Override public void onCredentialsUpdate(SSLContext sslContext, AthenzService ignored) { updateHttpClient(); } public HealthEndpoint getEndpoint() { return endpoint; } public HealthInfo getHealthInfo() { try { return probeHealth(); } catch (Exception e) { return HealthInfo.fromException(e); } } @Override public void close() { endpoint.removeListener(this); if (httpClient != null) { try { httpClient.close(); } catch (Exception e) { } httpClient = null; } } private void updateHttpClient() { CloseableHttpClient httpClient = clientSupplier.get(); if (this.httpClient != null) { uncheck(() -> this.httpClient.close()); } this.httpClient = httpClient; } private static CloseableHttpClient makeCloseableHttpClient(HealthEndpoint endpoint) { Registry<ConnectionSocketFactory> registry = RegistryBuilder.<ConnectionSocketFactory>create() .register(endpoint.getStateV1HealthUrl().getProtocol(), endpoint.getConnectionSocketFactory()) .build(); HttpClientConnectionManager connectionManager = new BasicHttpClientConnectionManager(registry); RequestConfig requestConfig = RequestConfig.custom() .setConnectTimeout(DEFAULT_TIMEOUT_MILLIS) .setConnectionRequestTimeout(DEFAULT_TIMEOUT_MILLIS) .setSocketTimeout(DEFAULT_TIMEOUT_MILLIS) .build(); return HttpClients.custom() .setKeepAliveStrategy(KEEP_ALIVE_STRATEGY) .setConnectionManager(connectionManager) .disableAutomaticRetries() .setDefaultRequestConfig(requestConfig) .build(); } }
class HealthClient implements AutoCloseable, ServiceIdentityProvider.Listener { private static final ObjectMapper mapper = new ObjectMapper(); private static final long MAX_CONTENT_LENGTH = 1L << 20; private static final int DEFAULT_TIMEOUT_MILLIS = 1_000; private static final ConnectionKeepAliveStrategy KEEP_ALIVE_STRATEGY = new DefaultConnectionKeepAliveStrategy() { @Override public long getKeepAliveDuration(HttpResponse response, HttpContext context) { long keepAlive = super.getKeepAliveDuration(response, context); if (keepAlive == -1) { keepAlive = 60000; } return keepAlive; } }; private final HealthEndpoint endpoint; private final Supplier<CloseableHttpClient> clientSupplier; private final Function<HttpEntity, String> getContentFunction; private CloseableHttpClient httpClient = null; public HealthClient(HealthEndpoint endpoint) { this(endpoint, () -> makeCloseableHttpClient(endpoint), entity -> uncheck(() -> EntityUtils.toString(entity))); } /** For testing. */ HealthClient(HealthEndpoint endpoint, Supplier<CloseableHttpClient> clientSupplier, Function<HttpEntity, String> getContentFunction) { this.endpoint = endpoint; this.clientSupplier = clientSupplier; this.getContentFunction = getContentFunction; } public void start() { updateHttpClient(); endpoint.registerListener(this); } @Override public void onCredentialsUpdate(SSLContext sslContext, AthenzService ignored) { updateHttpClient(); } public HealthEndpoint getEndpoint() { return endpoint; } public HealthInfo getHealthInfo() { try { return probeHealth(); } catch (Exception e) { return HealthInfo.fromException(e); } } @Override public void close() { endpoint.removeListener(this); if (httpClient != null) { try { httpClient.close(); } catch (Exception e) { } httpClient = null; } } private void updateHttpClient() { CloseableHttpClient httpClient = clientSupplier.get(); if (this.httpClient != null) { uncheck(() -> this.httpClient.close()); } this.httpClient = httpClient; } private static CloseableHttpClient makeCloseableHttpClient(HealthEndpoint endpoint) { Registry<ConnectionSocketFactory> registry = RegistryBuilder.<ConnectionSocketFactory>create() .register(endpoint.getStateV1HealthUrl().getProtocol(), endpoint.getConnectionSocketFactory()) .build(); HttpClientConnectionManager connectionManager = new BasicHttpClientConnectionManager(registry); RequestConfig requestConfig = RequestConfig.custom() .setConnectTimeout(DEFAULT_TIMEOUT_MILLIS) .setConnectionRequestTimeout(DEFAULT_TIMEOUT_MILLIS) .setSocketTimeout(DEFAULT_TIMEOUT_MILLIS) .build(); return HttpClients.custom() .setKeepAliveStrategy(KEEP_ALIVE_STRATEGY) .setConnectionManager(connectionManager) .disableAutomaticRetries() .setDefaultRequestConfig(requestConfig) .build(); } }
Fixed
private HealthInfo probeHealth() throws Exception { HttpGet httpget = new HttpGet(endpoint.getStateV1HealthUrl().toString()); CloseableHttpClient httpClient = this.httpClient; if (httpClient == null) { throw new IllegalStateException("HTTP client never started or has closed"); } CloseableHttpResponse httpResponse = httpClient.execute(httpget); try { int httpStatusCode = httpResponse.getStatusLine().getStatusCode(); if (httpStatusCode < 200 || httpStatusCode >= 300) { return HealthInfo.fromBadHttpStatusCode(httpStatusCode); } HttpEntity bodyEntity = httpResponse.getEntity(); long contentLength = bodyEntity.getContentLength(); if (contentLength > MAX_CONTENT_LENGTH) { throw new IllegalArgumentException("Content too long: " + contentLength + " bytes"); } String body = getContentFunction.apply(bodyEntity); HealthResponse healthResponse = mapper.readValue(body, HealthResponse.class); if (healthResponse.status == null || healthResponse.status.code == null) { return HealthInfo.fromHealthStatusCode(HealthResponse.Status.DEFAULT_STATUS); } else { return HealthInfo.fromHealthStatusCode(healthResponse.status.code); } } finally { httpResponse.close(); } }
CloseableHttpResponse httpResponse = httpClient.execute(httpget);
private HealthInfo probeHealth() throws Exception { HttpGet httpget = new HttpGet(endpoint.getStateV1HealthUrl().toString()); CloseableHttpClient httpClient = this.httpClient; if (httpClient == null) { throw new IllegalStateException("HTTP client never started or has closed"); } try (CloseableHttpResponse httpResponse = httpClient.execute(httpget)) { int httpStatusCode = httpResponse.getStatusLine().getStatusCode(); if (httpStatusCode < 200 || httpStatusCode >= 300) { return HealthInfo.fromBadHttpStatusCode(httpStatusCode); } HttpEntity bodyEntity = httpResponse.getEntity(); long contentLength = bodyEntity.getContentLength(); if (contentLength > MAX_CONTENT_LENGTH) { throw new IllegalArgumentException("Content too long: " + contentLength + " bytes"); } String body = getContentFunction.apply(bodyEntity); HealthResponse healthResponse = mapper.readValue(body, HealthResponse.class); if (healthResponse.status == null || healthResponse.status.code == null) { return HealthInfo.fromHealthStatusCode(HealthResponse.Status.DEFAULT_STATUS); } else { return HealthInfo.fromHealthStatusCode(healthResponse.status.code); } } }
class HealthClient implements AutoCloseable, ServiceIdentityProvider.Listener { private static final ObjectMapper mapper = new ObjectMapper(); private static final long MAX_CONTENT_LENGTH = 1L << 20; private static final int DEFAULT_TIMEOUT_MILLIS = 1_000; private static final ConnectionKeepAliveStrategy KEEP_ALIVE_STRATEGY = new DefaultConnectionKeepAliveStrategy() { @Override public long getKeepAliveDuration(HttpResponse response, HttpContext context) { long keepAlive = super.getKeepAliveDuration(response, context); if (keepAlive == -1) { keepAlive = 60000; } return keepAlive; } }; private final HealthEndpoint endpoint; private final Supplier<CloseableHttpClient> clientSupplier; private final Function<HttpEntity, String> getContentFunction; private CloseableHttpClient httpClient = null; public HealthClient(HealthEndpoint endpoint) { this(endpoint, () -> makeCloseableHttpClient(endpoint), entity -> uncheck(() -> EntityUtils.toString(entity))); } /** For testing. */ HealthClient(HealthEndpoint endpoint, Supplier<CloseableHttpClient> clientSupplier, Function<HttpEntity, String> getContentFunction) { this.endpoint = endpoint; this.clientSupplier = clientSupplier; this.getContentFunction = getContentFunction; } public void start() { updateHttpClient(); endpoint.registerListener(this); } @Override public void onCredentialsUpdate(SSLContext sslContext, AthenzService ignored) { updateHttpClient(); } public HealthEndpoint getEndpoint() { return endpoint; } public HealthInfo getHealthInfo() { try { return probeHealth(); } catch (Exception e) { return HealthInfo.fromException(e); } } @Override public void close() { endpoint.removeListener(this); if (httpClient != null) { try { httpClient.close(); } catch (Exception e) { } httpClient = null; } } private void updateHttpClient() { CloseableHttpClient httpClient = clientSupplier.get(); if (this.httpClient != null) { uncheck(() -> this.httpClient.close()); } this.httpClient = httpClient; } private static CloseableHttpClient makeCloseableHttpClient(HealthEndpoint endpoint) { Registry<ConnectionSocketFactory> registry = RegistryBuilder.<ConnectionSocketFactory>create() .register(endpoint.getStateV1HealthUrl().getProtocol(), endpoint.getConnectionSocketFactory()) .build(); HttpClientConnectionManager connectionManager = new BasicHttpClientConnectionManager(registry); RequestConfig requestConfig = RequestConfig.custom() .setConnectTimeout(DEFAULT_TIMEOUT_MILLIS) .setConnectionRequestTimeout(DEFAULT_TIMEOUT_MILLIS) .setSocketTimeout(DEFAULT_TIMEOUT_MILLIS) .build(); return HttpClients.custom() .setKeepAliveStrategy(KEEP_ALIVE_STRATEGY) .setConnectionManager(connectionManager) .disableAutomaticRetries() .setDefaultRequestConfig(requestConfig) .build(); } }
class HealthClient implements AutoCloseable, ServiceIdentityProvider.Listener { private static final ObjectMapper mapper = new ObjectMapper(); private static final long MAX_CONTENT_LENGTH = 1L << 20; private static final int DEFAULT_TIMEOUT_MILLIS = 1_000; private static final ConnectionKeepAliveStrategy KEEP_ALIVE_STRATEGY = new DefaultConnectionKeepAliveStrategy() { @Override public long getKeepAliveDuration(HttpResponse response, HttpContext context) { long keepAlive = super.getKeepAliveDuration(response, context); if (keepAlive == -1) { keepAlive = 60000; } return keepAlive; } }; private final HealthEndpoint endpoint; private final Supplier<CloseableHttpClient> clientSupplier; private final Function<HttpEntity, String> getContentFunction; private CloseableHttpClient httpClient = null; public HealthClient(HealthEndpoint endpoint) { this(endpoint, () -> makeCloseableHttpClient(endpoint), entity -> uncheck(() -> EntityUtils.toString(entity))); } /** For testing. */ HealthClient(HealthEndpoint endpoint, Supplier<CloseableHttpClient> clientSupplier, Function<HttpEntity, String> getContentFunction) { this.endpoint = endpoint; this.clientSupplier = clientSupplier; this.getContentFunction = getContentFunction; } public void start() { updateHttpClient(); endpoint.registerListener(this); } @Override public void onCredentialsUpdate(SSLContext sslContext, AthenzService ignored) { updateHttpClient(); } public HealthEndpoint getEndpoint() { return endpoint; } public HealthInfo getHealthInfo() { try { return probeHealth(); } catch (Exception e) { return HealthInfo.fromException(e); } } @Override public void close() { endpoint.removeListener(this); if (httpClient != null) { try { httpClient.close(); } catch (Exception e) { } httpClient = null; } } private void updateHttpClient() { CloseableHttpClient httpClient = clientSupplier.get(); if (this.httpClient != null) { uncheck(() -> this.httpClient.close()); } this.httpClient = httpClient; } private static CloseableHttpClient makeCloseableHttpClient(HealthEndpoint endpoint) { Registry<ConnectionSocketFactory> registry = RegistryBuilder.<ConnectionSocketFactory>create() .register(endpoint.getStateV1HealthUrl().getProtocol(), endpoint.getConnectionSocketFactory()) .build(); HttpClientConnectionManager connectionManager = new BasicHttpClientConnectionManager(registry); RequestConfig requestConfig = RequestConfig.custom() .setConnectTimeout(DEFAULT_TIMEOUT_MILLIS) .setConnectionRequestTimeout(DEFAULT_TIMEOUT_MILLIS) .setSocketTimeout(DEFAULT_TIMEOUT_MILLIS) .build(); return HttpClients.custom() .setKeepAliveStrategy(KEEP_ALIVE_STRATEGY) .setConnectionManager(connectionManager) .disableAutomaticRetries() .setDefaultRequestConfig(requestConfig) .build(); } }
consider calling drain here to avoid returning a single frame even if multiple frames could be decoded
public int read(ByteBuffer dst) throws IOException { verifyHandshakeCompleted(); int bytesUnwrapped = applicationDataUnwrap(dst); if (bytesUnwrapped > 0) return bytesUnwrapped; int bytesRead = channelRead(); if (bytesRead == 0) return 0; return drain(dst); }
int bytesUnwrapped = applicationDataUnwrap(dst);
public int read(ByteBuffer dst) throws IOException { verifyHandshakeCompleted(); int bytesUnwrapped = drain(dst); if (bytesUnwrapped > 0) return bytesUnwrapped; int bytesRead = channelRead(); if (bytesRead == 0) return 0; return drain(dst); }
class TlsCryptoSocket implements CryptoSocket { private static final ByteBuffer NULL_BUFFER = ByteBuffer.allocate(0); private static final Logger log = Logger.getLogger(TlsCryptoSocket.class.getName()); private enum HandshakeState { NOT_STARTED, NEED_READ, NEED_WRITE, COMPLETED } private final SocketChannel channel; private final SSLEngine sslEngine; private final Buffer wrapBuffer; private final Buffer unwrapBuffer; private int sessionPacketBufferSize; private int sessionApplicationBufferSize; private ByteBuffer handshakeDummyBuffer; private HandshakeState handshakeState; public TlsCryptoSocket(SocketChannel channel, SSLEngine sslEngine) { this.channel = channel; this.sslEngine = sslEngine; SSLSession nullSession = sslEngine.getSession(); this.wrapBuffer = new Buffer(nullSession.getPacketBufferSize() * 2); this.unwrapBuffer = new Buffer(nullSession.getPacketBufferSize() * 2); this.handshakeDummyBuffer = ByteBuffer.allocate(nullSession.getApplicationBufferSize()); this.handshakeState = HandshakeState.NOT_STARTED; } @Override public SocketChannel channel() { return channel; } @Override public HandshakeResult handshake() throws IOException { HandshakeState newHandshakeState = processHandshakeState(this.handshakeState); log.fine(() -> String.format("Handshake state '%s -> %s'", this.handshakeState, newHandshakeState)); this.handshakeState = newHandshakeState; return toHandshakeResult(newHandshakeState); } private HandshakeState processHandshakeState(HandshakeState state) throws IOException { switch (state) { case NOT_STARTED: sslEngine.beginHandshake(); break; case NEED_WRITE: channelWrite(); break; case NEED_READ: channelRead(); break; case COMPLETED: return HandshakeState.COMPLETED; default: throw unhandledStateException(state); } while (true) { switch (sslEngine.getHandshakeStatus()) { case NOT_HANDSHAKING: if (wrapBuffer.bytes() > 0) return HandshakeState.NEED_WRITE; sslEngine.setEnableSessionCreation(false); handshakeDummyBuffer = null; SSLSession session = sslEngine.getSession(); sessionApplicationBufferSize = session.getApplicationBufferSize(); sessionPacketBufferSize = session.getPacketBufferSize(); return HandshakeState.COMPLETED; case NEED_TASK: sslEngine.getDelegatedTask().run(); break; case NEED_UNWRAP: if (wrapBuffer.bytes() > 0) return HandshakeState.NEED_WRITE; if (!handshakeUnwrap()) return HandshakeState.NEED_READ; break; case NEED_WRAP: if (!handshakeWrap()) return HandshakeState.NEED_WRITE; break; default: throw new IllegalStateException("Unexpected handshake status: " + sslEngine.getHandshakeStatus()); } } } private static HandshakeResult toHandshakeResult(HandshakeState state) { switch (state) { case NEED_READ: return HandshakeResult.NEED_READ; case NEED_WRITE: return HandshakeResult.NEED_WRITE; case COMPLETED: return HandshakeResult.DONE; default: throw unhandledStateException(state); } } @Override public int getMinimumReadBufferSize() { return sessionApplicationBufferSize; } @Override @Override public int drain(ByteBuffer dst) throws IOException { verifyHandshakeCompleted(); int totalBytesUnwrapped = 0; int bytesUnwrapped; do { bytesUnwrapped = applicationDataUnwrap(dst); totalBytesUnwrapped += bytesUnwrapped; } while (bytesUnwrapped > 0); return totalBytesUnwrapped; } @Override public int write(ByteBuffer src) throws IOException { if (flush() == FlushResult.NEED_WRITE) return 0; int totalBytesWrapped = 0; int bytesWrapped; do { bytesWrapped = applicationDataWrap(src); totalBytesWrapped += bytesWrapped; } while (bytesWrapped > 0 && wrapBuffer.bytes() < sessionPacketBufferSize); return totalBytesWrapped; } @Override public FlushResult flush() throws IOException { channelWrite(); return wrapBuffer.bytes() > 0 ? FlushResult.NEED_WRITE : FlushResult.DONE; } private boolean handshakeWrap() throws IOException { SSLEngineResult result = sslEngineWrap(NULL_BUFFER); switch (result.getStatus()) { case OK: return true; case BUFFER_OVERFLOW: return false; default: throw unexpectedStatusException(result.getStatus()); } } private int applicationDataWrap(ByteBuffer src) throws IOException { SSLEngineResult result = sslEngineWrap(src); if (result.getHandshakeStatus() != HandshakeStatus.NOT_HANDSHAKING) throw new SSLException("Renegotiation detected"); switch (result.getStatus()) { case OK: return result.bytesConsumed(); case BUFFER_OVERFLOW: return 0; default: throw unexpectedStatusException(result.getStatus()); } } private SSLEngineResult sslEngineWrap(ByteBuffer src) throws IOException { SSLEngineResult result = sslEngine.wrap(src, wrapBuffer.getWritable(sessionPacketBufferSize)); if (result.getStatus() == Status.CLOSED) throw new ClosedChannelException(); return result; } private boolean handshakeUnwrap() throws IOException { SSLEngineResult result = sslEngineUnwrap(handshakeDummyBuffer); switch (result.getStatus()) { case OK: if (result.bytesProduced() > 0) throw new SSLException("Got application data in handshake unwrap"); return true; case BUFFER_UNDERFLOW: return false; default: throw unexpectedStatusException(result.getStatus()); } } private int applicationDataUnwrap(ByteBuffer dst) throws IOException { SSLEngineResult result = sslEngineUnwrap(dst); if (result.getHandshakeStatus() != HandshakeStatus.NOT_HANDSHAKING) throw new SSLException("Renegotiation detected"); switch (result.getStatus()) { case OK: return result.bytesProduced(); case BUFFER_OVERFLOW: case BUFFER_UNDERFLOW: return 0; default: throw unexpectedStatusException(result.getStatus()); } } private SSLEngineResult sslEngineUnwrap(ByteBuffer dst) throws IOException { SSLEngineResult result = sslEngine.unwrap(unwrapBuffer.getReadable(), dst); if (result.getStatus() == Status.CLOSED) throw new ClosedChannelException(); return result; } private int channelRead() throws IOException { int read = channel.read(unwrapBuffer.getWritable(sessionPacketBufferSize)); if (read == -1) throw new ClosedChannelException(); return read; } private int channelWrite() throws IOException { return channel.write(wrapBuffer.getReadable()); } private static IllegalStateException unhandledStateException(HandshakeState state) { return new IllegalStateException("Unhandled state: " + state); } private static IllegalStateException unexpectedStatusException(Status status) { return new IllegalStateException("Unexpected status: " + status); } private void verifyHandshakeCompleted() throws SSLException { if (handshakeState != HandshakeState.COMPLETED) throw new SSLException("Handshake not completed: handshakeState=" + handshakeState); } }
class TlsCryptoSocket implements CryptoSocket { private static final ByteBuffer NULL_BUFFER = ByteBuffer.allocate(0); private static final Logger log = Logger.getLogger(TlsCryptoSocket.class.getName()); private enum HandshakeState { NOT_STARTED, NEED_READ, NEED_WRITE, COMPLETED } private final SocketChannel channel; private final SSLEngine sslEngine; private final Buffer wrapBuffer; private final Buffer unwrapBuffer; private int sessionPacketBufferSize; private int sessionApplicationBufferSize; private ByteBuffer handshakeDummyBuffer; private HandshakeState handshakeState; public TlsCryptoSocket(SocketChannel channel, SSLEngine sslEngine) { this.channel = channel; this.sslEngine = sslEngine; SSLSession nullSession = sslEngine.getSession(); this.wrapBuffer = new Buffer(nullSession.getPacketBufferSize() * 2); this.unwrapBuffer = new Buffer(nullSession.getPacketBufferSize() * 2); this.handshakeDummyBuffer = ByteBuffer.allocate(nullSession.getApplicationBufferSize()); this.handshakeState = HandshakeState.NOT_STARTED; } @Override public SocketChannel channel() { return channel; } @Override public HandshakeResult handshake() throws IOException { HandshakeState newHandshakeState = processHandshakeState(this.handshakeState); log.fine(() -> String.format("Handshake state '%s -> %s'", this.handshakeState, newHandshakeState)); this.handshakeState = newHandshakeState; return toHandshakeResult(newHandshakeState); } private HandshakeState processHandshakeState(HandshakeState state) throws IOException { switch (state) { case NOT_STARTED: sslEngine.beginHandshake(); break; case NEED_WRITE: channelWrite(); break; case NEED_READ: channelRead(); break; case COMPLETED: return HandshakeState.COMPLETED; default: throw unhandledStateException(state); } while (true) { switch (sslEngine.getHandshakeStatus()) { case NOT_HANDSHAKING: if (wrapBuffer.bytes() > 0) return HandshakeState.NEED_WRITE; sslEngine.setEnableSessionCreation(false); handshakeDummyBuffer = null; SSLSession session = sslEngine.getSession(); sessionApplicationBufferSize = session.getApplicationBufferSize(); sessionPacketBufferSize = session.getPacketBufferSize(); return HandshakeState.COMPLETED; case NEED_TASK: sslEngine.getDelegatedTask().run(); break; case NEED_UNWRAP: if (wrapBuffer.bytes() > 0) return HandshakeState.NEED_WRITE; if (!handshakeUnwrap()) return HandshakeState.NEED_READ; break; case NEED_WRAP: if (!handshakeWrap()) return HandshakeState.NEED_WRITE; break; default: throw new IllegalStateException("Unexpected handshake status: " + sslEngine.getHandshakeStatus()); } } } private static HandshakeResult toHandshakeResult(HandshakeState state) { switch (state) { case NEED_READ: return HandshakeResult.NEED_READ; case NEED_WRITE: return HandshakeResult.NEED_WRITE; case COMPLETED: return HandshakeResult.DONE; default: throw unhandledStateException(state); } } @Override public int getMinimumReadBufferSize() { return sessionApplicationBufferSize; } @Override @Override public int drain(ByteBuffer dst) throws IOException { verifyHandshakeCompleted(); int totalBytesUnwrapped = 0; int bytesUnwrapped; do { bytesUnwrapped = applicationDataUnwrap(dst); totalBytesUnwrapped += bytesUnwrapped; } while (bytesUnwrapped > 0); return totalBytesUnwrapped; } @Override public int write(ByteBuffer src) throws IOException { if (flush() == FlushResult.NEED_WRITE) return 0; int totalBytesWrapped = 0; int bytesWrapped; do { bytesWrapped = applicationDataWrap(src); totalBytesWrapped += bytesWrapped; } while (bytesWrapped > 0 && wrapBuffer.bytes() < sessionPacketBufferSize); return totalBytesWrapped; } @Override public FlushResult flush() throws IOException { channelWrite(); return wrapBuffer.bytes() > 0 ? FlushResult.NEED_WRITE : FlushResult.DONE; } private boolean handshakeWrap() throws IOException { SSLEngineResult result = sslEngineWrap(NULL_BUFFER); switch (result.getStatus()) { case OK: return true; case BUFFER_OVERFLOW: return false; default: throw unexpectedStatusException(result.getStatus()); } } private int applicationDataWrap(ByteBuffer src) throws IOException { SSLEngineResult result = sslEngineWrap(src); if (result.getHandshakeStatus() != HandshakeStatus.NOT_HANDSHAKING) throw new SSLException("Renegotiation detected"); switch (result.getStatus()) { case OK: return result.bytesConsumed(); case BUFFER_OVERFLOW: return 0; default: throw unexpectedStatusException(result.getStatus()); } } private SSLEngineResult sslEngineWrap(ByteBuffer src) throws IOException { SSLEngineResult result = sslEngine.wrap(src, wrapBuffer.getWritable(sessionPacketBufferSize)); if (result.getStatus() == Status.CLOSED) throw new ClosedChannelException(); return result; } private boolean handshakeUnwrap() throws IOException { SSLEngineResult result = sslEngineUnwrap(handshakeDummyBuffer); switch (result.getStatus()) { case OK: if (result.bytesProduced() > 0) throw new SSLException("Got application data in handshake unwrap"); return true; case BUFFER_UNDERFLOW: return false; default: throw unexpectedStatusException(result.getStatus()); } } private int applicationDataUnwrap(ByteBuffer dst) throws IOException { SSLEngineResult result = sslEngineUnwrap(dst); if (result.getHandshakeStatus() != HandshakeStatus.NOT_HANDSHAKING) throw new SSLException("Renegotiation detected"); switch (result.getStatus()) { case OK: return result.bytesProduced(); case BUFFER_OVERFLOW: case BUFFER_UNDERFLOW: return 0; default: throw unexpectedStatusException(result.getStatus()); } } private SSLEngineResult sslEngineUnwrap(ByteBuffer dst) throws IOException { SSLEngineResult result = sslEngine.unwrap(unwrapBuffer.getReadable(), dst); if (result.getStatus() == Status.CLOSED) throw new ClosedChannelException(); return result; } private int channelRead() throws IOException { int read = channel.read(unwrapBuffer.getWritable(sessionPacketBufferSize)); if (read == -1) throw new ClosedChannelException(); return read; } private int channelWrite() throws IOException { return channel.write(wrapBuffer.getReadable()); } private static IllegalStateException unhandledStateException(HandshakeState state) { return new IllegalStateException("Unhandled state: " + state); } private static IllegalStateException unexpectedStatusException(Status status) { return new IllegalStateException("Unexpected status: " + status); } private void verifyHandshakeCompleted() throws SSLException { if (handshakeState != HandshakeState.COMPLETED) throw new SSLException("Handshake not completed: handshakeState=" + handshakeState); } }
remember to create a new registerinstance that does not require _null_ and _false_
AthenzCredentials registerInstance() { KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA); IdentityDocumentClient identityDocumentClient = createIdentityDocumentClient(identityConfig, nodeIdentityProvider); SignedIdentityDocument document = identityDocumentClient.getTenantIdentityDocument(hostname); AthenzService tenantIdentity = new AthenzService(identityConfig.domain(), identityConfig.service()); Pkcs10Csr csr = instanceCsrGenerator.generateCsr( tenantIdentity, document.providerUniqueId(), document.ipAddresses(), keyPair); try (ZtsClient ztsClient = new DefaultZtsClient(URI.create(identityConfig.ztsUrl()), nodeIdentityProvider)) { InstanceIdentity instanceIdentity = ztsClient.registerInstance( new AthenzService(identityConfig.configserverIdentityName()), tenantIdentity, null, EntityBindingsMapper.toAttestationData(document), false, csr); return toAthenzCredentials(instanceIdentity, keyPair, document); } }
false,
AthenzCredentials registerInstance() { Optional<AthenzCredentials> athenzCredentialsFromDisk = tryReadCredentialsFromDisk(); if (athenzCredentialsFromDisk.isPresent()) { return athenzCredentialsFromDisk.get(); } KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA); IdentityDocumentClient identityDocumentClient = createIdentityDocumentClient(); SignedIdentityDocument document = identityDocumentClient.getTenantIdentityDocument(hostname); Pkcs10Csr csr = instanceCsrGenerator.generateCsr( tenantIdentity, document.providerUniqueId(), document.ipAddresses(), keyPair); try (ZtsClient ztsClient = new DefaultZtsClient(ztsEndpoint, nodeIdentityProvider)) { InstanceIdentity instanceIdentity = ztsClient.registerInstance( configserverIdentity, tenantIdentity, null, EntityBindingsMapper.toAttestationData(document), false, csr); X509Certificate certificate = instanceIdentity.certificate(); SSLContext identitySslContext = createIdentitySslContext(keyPair.getPrivate(), certificate); writeCredentialsToDisk(keyPair.getPrivate(), certificate, document); return new AthenzCredentials(certificate, keyPair, document, identitySslContext); } }
class AthenzCredentialsService { private final IdentityConfig identityConfig; private final ServiceIdentityProvider nodeIdentityProvider; private final File trustStoreJks; private final String hostname; private final InstanceCsrGenerator instanceCsrGenerator; AthenzCredentialsService(IdentityConfig identityConfig, ServiceIdentityProvider nodeIdentityProvider, File trustStoreJks, String hostname) { this.identityConfig = identityConfig; this.nodeIdentityProvider = nodeIdentityProvider; this.trustStoreJks = trustStoreJks; this.hostname = hostname; this.instanceCsrGenerator = new InstanceCsrGenerator(identityConfig.athenzDnsSuffix()); } AthenzCredentials updateCredentials(SignedIdentityDocument document, SSLContext sslContext) { AthenzService tenantIdentity = new AthenzService(identityConfig.domain(), identityConfig.service()); KeyPair newKeyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA); Pkcs10Csr csr = instanceCsrGenerator.generateCsr( tenantIdentity, document.providerUniqueId(), document.ipAddresses(), newKeyPair); try (ZtsClient ztsClient = new DefaultZtsClient(URI.create(identityConfig.ztsUrl()), tenantIdentity, sslContext)) { InstanceIdentity instanceIdentity = ztsClient.refreshInstance( new AthenzService(identityConfig.configserverIdentityName()), tenantIdentity, document.providerUniqueId().asDottedString(), false, csr); return toAthenzCredentials(instanceIdentity, newKeyPair, document); } } private AthenzCredentials toAthenzCredentials(InstanceIdentity instanceIdentity, KeyPair keyPair, SignedIdentityDocument identityDocument) { X509Certificate certificate = instanceIdentity.certificate(); SSLContext identitySslContext = createIdentitySslContext(keyPair.getPrivate(), certificate); return new AthenzCredentials(certificate, keyPair, identityDocument, identitySslContext); } private SSLContext createIdentitySslContext(PrivateKey privateKey, X509Certificate certificate) { return new SslContextBuilder() .withKeyStore(privateKey, certificate) .withTrustStore(trustStoreJks, JKS) .build(); } private static DefaultIdentityDocumentClient createIdentityDocumentClient(IdentityConfig config, ServiceIdentityProvider nodeIdentityProvider) { return new DefaultIdentityDocumentClient( URI.create(config.loadBalancerAddress()), nodeIdentityProvider, new AthenzIdentityVerifier(singleton(new AthenzService(config.configserverIdentityName())))); } }
class AthenzCredentialsService { private static final Duration EXPIRATION_MARGIN = Duration.ofDays(2); private static final Path VESPA_SIA_DIRECTORY = Paths.get(Defaults.getDefaults().underVespaHome("var/vespa/sia")); private static final Path IDENTITY_DOCUMENT_FILE = VESPA_SIA_DIRECTORY.resolve("vespa-tenant-identity-document.json"); private final AthenzService tenantIdentity; private final URI configserverEndpoint; private final URI ztsEndpoint; private final AthenzService configserverIdentity; private final ServiceIdentityProvider nodeIdentityProvider; private final File trustStoreJks; private final String hostname; private final InstanceCsrGenerator instanceCsrGenerator; private final Clock clock; AthenzCredentialsService(IdentityConfig identityConfig, ServiceIdentityProvider nodeIdentityProvider, File trustStoreJks, String hostname, Clock clock) { this.tenantIdentity = new AthenzService(identityConfig.domain(), identityConfig.service()); this.configserverEndpoint = URI.create(identityConfig.loadBalancerAddress()); this.ztsEndpoint = URI.create(identityConfig.ztsUrl()); this.configserverIdentity = new AthenzService(identityConfig.configserverIdentityName()); this.nodeIdentityProvider = nodeIdentityProvider; this.trustStoreJks = trustStoreJks; this.hostname = hostname; this.instanceCsrGenerator = new InstanceCsrGenerator(identityConfig.athenzDnsSuffix()); this.clock = clock; } AthenzCredentials updateCredentials(SignedIdentityDocument document, SSLContext sslContext) { KeyPair newKeyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA); Pkcs10Csr csr = instanceCsrGenerator.generateCsr( tenantIdentity, document.providerUniqueId(), document.ipAddresses(), newKeyPair); try (ZtsClient ztsClient = new DefaultZtsClient(ztsEndpoint, tenantIdentity, sslContext)) { InstanceIdentity instanceIdentity = ztsClient.refreshInstance( configserverIdentity, tenantIdentity, document.providerUniqueId().asDottedString(), false, csr); X509Certificate certificate = instanceIdentity.certificate(); SSLContext identitySslContext = createIdentitySslContext(newKeyPair.getPrivate(), certificate); writeCredentialsToDisk(newKeyPair.getPrivate(), certificate, document); return new AthenzCredentials(certificate, newKeyPair, document, identitySslContext); } } private Optional<AthenzCredentials> tryReadCredentialsFromDisk() { Optional<PrivateKey> privateKey = SiaUtils.readPrivateKeyFile(VESPA_SIA_DIRECTORY, tenantIdentity); if (!privateKey.isPresent()) return Optional.empty(); Optional<X509Certificate> certificate = SiaUtils.readCertificateFile(VESPA_SIA_DIRECTORY, tenantIdentity); if (!certificate.isPresent()) return Optional.empty(); if (isExpired(certificate.get())) { return Optional.empty(); } if (Files.notExists(IDENTITY_DOCUMENT_FILE)) return Optional.empty(); SignedIdentityDocument signedIdentityDocument = EntityBindingsMapper.readSignedIdentityDocumentFromFile(IDENTITY_DOCUMENT_FILE); KeyPair keyPair = new KeyPair(KeyUtils.extractPublicKey(privateKey.get()), privateKey.get()); SSLContext sslContext = createIdentitySslContext(privateKey.get(), certificate.get()); return Optional.of(new AthenzCredentials(certificate.get(), keyPair, signedIdentityDocument, sslContext)); } private boolean isExpired(X509Certificate certificate) { return clock.instant().isAfter(certificate.getNotAfter().toInstant().minus(EXPIRATION_MARGIN)); } private void writeCredentialsToDisk(PrivateKey privateKey, X509Certificate certificate, SignedIdentityDocument identityDocument) { SiaUtils.writePrivateKeyFile(VESPA_SIA_DIRECTORY, tenantIdentity, privateKey); SiaUtils.writeCertificateFile(VESPA_SIA_DIRECTORY, tenantIdentity, certificate); EntityBindingsMapper.writeSignedIdentityDocumentToFile(IDENTITY_DOCUMENT_FILE, identityDocument); } private SSLContext createIdentitySslContext(PrivateKey privateKey, X509Certificate certificate) { return new SslContextBuilder() .withKeyStore(privateKey, certificate) .withTrustStore(trustStoreJks, JKS) .build(); } private DefaultIdentityDocumentClient createIdentityDocumentClient() { return new DefaultIdentityDocumentClient( configserverEndpoint, nodeIdentityProvider, new AthenzIdentityVerifier(singleton(configserverIdentity))); } }
`SetNodeStatesForClusterRequest` does not actually override `RemoteClusterControllerTask.hasVersionAckDependency()` (default `false`) today, but it makes sense to have this here anyway for symmetry.
public boolean isFailed() { return super.isFailed() || (resultSet && !result.getWasModified()); }
}
public boolean isFailed() { return super.isFailed() || (resultSet && !result.getWasModified()); }
class SetNodeStatesForClusterRequest extends Request<SetResponse> { private static final Logger log = Logger.getLogger(SetNodeStateRequest.class.getName()); private final Id.Cluster cluster; private final Map<String, UnitState> newStates; private final SetUnitStateRequest.Condition condition; public SetNodeStatesForClusterRequest(Id.Cluster cluster, SetUnitStateRequest request) { super(MasterState.MUST_BE_MASTER); this.cluster = cluster; this.newStates = request.getNewState(); this.condition = request.getCondition(); } @Override public SetResponse calculateResult(RemoteClusterControllerTask.Context context) throws StateRestApiException { if (condition != SetUnitStateRequest.Condition.FORCE) { throw new InvalidContentException( "Setting all nodes in a cluster to a state is only supported with FORCE"); } for (ConfiguredNode configuredNode : context.cluster.getConfiguredNodes().values()) { Node node = new Node(NodeType.STORAGE, configuredNode.index()); SetResponse setResponse = SetNodeStateRequest.setWantedState( context.cluster, condition, newStates, node, context.nodeStateOrHostInfoChangeHandler, context.currentConsolidatedState); if (!setResponse.getWasModified()) { throw new InternalFailure("We have not yet implemented the meaning of " + "failing to set the wanted state for a subset of nodes: " + "condition = " + condition + ", newStates = " + newStates + ", currentConsolidatedState = " + context.currentConsolidatedState); } } return new SetResponse("ok", true); } @Override }
class SetNodeStatesForClusterRequest extends Request<SetResponse> { private static final Logger log = Logger.getLogger(SetNodeStateRequest.class.getName()); private final Id.Cluster cluster; private final Map<String, UnitState> newStates; private final SetUnitStateRequest.Condition condition; public SetNodeStatesForClusterRequest(Id.Cluster cluster, SetUnitStateRequest request) { super(MasterState.MUST_BE_MASTER); this.cluster = cluster; this.newStates = request.getNewState(); this.condition = request.getCondition(); } @Override public SetResponse calculateResult(RemoteClusterControllerTask.Context context) throws StateRestApiException { if (condition != SetUnitStateRequest.Condition.FORCE) { throw new InvalidContentException( "Setting all nodes in a cluster to a state is only supported with FORCE"); } for (ConfiguredNode configuredNode : context.cluster.getConfiguredNodes().values()) { Node node = new Node(NodeType.STORAGE, configuredNode.index()); SetResponse setResponse = SetNodeStateRequest.setWantedState( context.cluster, condition, newStates, node, context.nodeStateOrHostInfoChangeHandler, context.currentConsolidatedState); if (!setResponse.getWasModified()) { throw new InternalFailure("We have not yet implemented the meaning of " + "failing to set the wanted state for a subset of nodes: " + "condition = " + condition + ", newStates = " + newStates + ", currentConsolidatedState = " + context.currentConsolidatedState); } } return new SetResponse("ok", true); } @Override }
Thanks! Removed.
private static Set<Metric> getStorageMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("vds.datastored.alldisks.docs.average")); metrics.add(new Metric("vds.datastored.alldisks.bytes.average")); metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.max")); metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.sum")); metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.count")); metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.average")); metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.max")); metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.sum")); metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.count")); metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.queuesize.max")); metrics.add(new Metric("vds.filestor.alldisks.queuesize.sum")); metrics.add(new Metric("vds.filestor.alldisks.queuesize.count")); metrics.add(new Metric("vds.filestor.alldisks.queuesize.average")); metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.max")); metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.sum")); metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.count")); metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.average")); metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.max")); metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.sum")); metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.count")); metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.average")); metrics.add(new Metric("vds.visitor.allthreads.completed.sum.average")); metrics.add(new Metric("vds.visitor.allthreads.completed.sum.rate")); metrics.add(new Metric("vds.visitor.allthreads.created.sum.rate")); metrics.add(new Metric("vds.visitor.allthreads.failed.sum.rate")); metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.max")); metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.sum")); metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.count")); metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.average")); metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.max")); metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.sum")); metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.count")); metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.splitbuckets.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.joinbuckets.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.setbucketstates.count.rate")); metrics.add(new Metric("vds.idealstate.buckets_rechecking.average")); metrics.add(new Metric("vds.idealstate.idealstate_diff.average")); metrics.add(new Metric("vds.idealstate.buckets_toofewcopies.average")); metrics.add(new Metric("vds.idealstate.buckets_toomanycopies.average")); metrics.add(new Metric("vds.idealstate.buckets.average")); metrics.add(new Metric("vds.idealstate.buckets_notrusted.average")); metrics.add(new Metric("vds.idealstate.delete_bucket.done_ok.rate")); metrics.add(new Metric("vds.idealstate.delete_bucket.done_failed.rate")); metrics.add(new Metric("vds.idealstate.delete_bucket.pending.average")); metrics.add(new Metric("vds.idealstate.merge_bucket.done_ok.rate")); metrics.add(new Metric("vds.idealstate.merge_bucket.done_failed.rate")); metrics.add(new Metric("vds.idealstate.merge_bucket.pending.average")); metrics.add(new Metric("vds.idealstate.split_bucket.done_ok.rate")); metrics.add(new Metric("vds.idealstate.split_bucket.done_failed.rate")); metrics.add(new Metric("vds.idealstate.split_bucket.pending.average")); metrics.add(new Metric("vds.idealstate.join_bucket.done_ok.rate")); metrics.add(new Metric("vds.idealstate.join_bucket.done_failed.rate")); metrics.add(new Metric("vds.idealstate.join_bucket.pending.average")); metrics.add(new Metric("vds.idealstate.garbage_collection.done_ok.rate")); metrics.add(new Metric("vds.idealstate.garbage_collection.done_failed.rate")); metrics.add(new Metric("vds.idealstate.garbage_collection.pending.average")); metrics.add(new Metric("vds.distributor.puts.sum.latency.max")); metrics.add(new Metric("vds.distributor.puts.sum.latency.sum")); metrics.add(new Metric("vds.distributor.puts.sum.latency.count")); metrics.add(new Metric("vds.distributor.puts.sum.latency.average")); metrics.add(new Metric("vds.distributor.puts.sum.ok.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.notfound.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.test_and_set_failed")); metrics.add(new Metric("vds.distributor.removes.sum.latency.max")); metrics.add(new Metric("vds.distributor.removes.sum.latency.sum")); metrics.add(new Metric("vds.distributor.removes.sum.latency.count")); metrics.add(new Metric("vds.distributor.removes.sum.latency.average")); metrics.add(new Metric("vds.distributor.removes.sum.ok.rate")); metrics.add(new Metric("vds.distributor.removes.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.removes.sum.failures.notfound.rate")); metrics.add(new Metric("vds.distributor.removes.sum.failures.test_and_set_failed")); metrics.add(new Metric("vds.distributor.updates.sum.latency.max")); metrics.add(new Metric("vds.distributor.updates.sum.latency.sum")); metrics.add(new Metric("vds.distributor.updates.sum.latency.count")); metrics.add(new Metric("vds.distributor.updates.sum.latency.average")); metrics.add(new Metric("vds.distributor.updates.sum.ok.rate")); metrics.add(new Metric("vds.distributor.updates.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.updates.sum.failures.notfound.rate")); metrics.add(new Metric("vds.distributor.updates.sum.failures.test_and_set_failed")); metrics.add(new Metric("vds.distributor.updates.sum.diverging_timestamp_updates.rate")); metrics.add(new Metric("vds.distributor.removelocations.sum.ok.rate")); metrics.add(new Metric("vds.distributor.removelocations.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.gets.sum.latency.max")); metrics.add(new Metric("vds.distributor.gets.sum.latency.sum")); metrics.add(new Metric("vds.distributor.gets.sum.latency.count")); metrics.add(new Metric("vds.distributor.gets.sum.latency.average")); metrics.add(new Metric("vds.distributor.gets.sum.ok.rate")); metrics.add(new Metric("vds.distributor.gets.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.gets.sum.failures.notfound.rate")); metrics.add(new Metric("vds.distributor.visitor.sum.latency.max")); metrics.add(new Metric("vds.distributor.visitor.sum.latency.sum")); metrics.add(new Metric("vds.distributor.visitor.sum.latency.count")); metrics.add(new Metric("vds.distributor.visitor.sum.latency.average")); metrics.add(new Metric("vds.distributor.visitor.sum.ok.rate")); metrics.add(new Metric("vds.distributor.visitor.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.visitor.sum.failures.notfound.rate")); metrics.add(new Metric("vds.distributor.docsstored.average")); metrics.add(new Metric("vds.distributor.bytesstored.average")); metrics.add(new Metric("vds.bouncer.clock_skew_aborts.count")); return metrics; }
metrics.add(new Metric("vds.distributor.visitor.sum.failures.notfound.rate"));
private static Set<Metric> getStorageMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("vds.datastored.alldisks.docs.average")); metrics.add(new Metric("vds.datastored.alldisks.bytes.average")); metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.max")); metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.sum")); metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.count")); metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.average")); metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.max")); metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.sum")); metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.count")); metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.queuesize.max")); metrics.add(new Metric("vds.filestor.alldisks.queuesize.sum")); metrics.add(new Metric("vds.filestor.alldisks.queuesize.count")); metrics.add(new Metric("vds.filestor.alldisks.queuesize.average")); metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.max")); metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.sum")); metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.count")); metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.average")); metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.max")); metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.sum")); metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.count")); metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.average")); metrics.add(new Metric("vds.visitor.allthreads.completed.sum.average")); metrics.add(new Metric("vds.visitor.allthreads.completed.sum.rate")); metrics.add(new Metric("vds.visitor.allthreads.created.sum.rate")); metrics.add(new Metric("vds.visitor.allthreads.failed.sum.rate")); metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.max")); metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.sum")); metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.count")); metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.average")); metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.max")); metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.sum")); metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.count")); metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.splitbuckets.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.joinbuckets.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.setbucketstates.count.rate")); metrics.add(new Metric("vds.idealstate.buckets_rechecking.average")); metrics.add(new Metric("vds.idealstate.idealstate_diff.average")); metrics.add(new Metric("vds.idealstate.buckets_toofewcopies.average")); metrics.add(new Metric("vds.idealstate.buckets_toomanycopies.average")); metrics.add(new Metric("vds.idealstate.buckets.average")); metrics.add(new Metric("vds.idealstate.buckets_notrusted.average")); metrics.add(new Metric("vds.idealstate.delete_bucket.done_ok.rate")); metrics.add(new Metric("vds.idealstate.delete_bucket.done_failed.rate")); metrics.add(new Metric("vds.idealstate.delete_bucket.pending.average")); metrics.add(new Metric("vds.idealstate.merge_bucket.done_ok.rate")); metrics.add(new Metric("vds.idealstate.merge_bucket.done_failed.rate")); metrics.add(new Metric("vds.idealstate.merge_bucket.pending.average")); metrics.add(new Metric("vds.idealstate.split_bucket.done_ok.rate")); metrics.add(new Metric("vds.idealstate.split_bucket.done_failed.rate")); metrics.add(new Metric("vds.idealstate.split_bucket.pending.average")); metrics.add(new Metric("vds.idealstate.join_bucket.done_ok.rate")); metrics.add(new Metric("vds.idealstate.join_bucket.done_failed.rate")); metrics.add(new Metric("vds.idealstate.join_bucket.pending.average")); metrics.add(new Metric("vds.idealstate.garbage_collection.done_ok.rate")); metrics.add(new Metric("vds.idealstate.garbage_collection.done_failed.rate")); metrics.add(new Metric("vds.idealstate.garbage_collection.pending.average")); metrics.add(new Metric("vds.distributor.puts.sum.latency.max")); metrics.add(new Metric("vds.distributor.puts.sum.latency.sum")); metrics.add(new Metric("vds.distributor.puts.sum.latency.count")); metrics.add(new Metric("vds.distributor.puts.sum.latency.average")); metrics.add(new Metric("vds.distributor.puts.sum.ok.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.notfound.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.test_and_set_failed")); metrics.add(new Metric("vds.distributor.removes.sum.latency.max")); metrics.add(new Metric("vds.distributor.removes.sum.latency.sum")); metrics.add(new Metric("vds.distributor.removes.sum.latency.count")); metrics.add(new Metric("vds.distributor.removes.sum.latency.average")); metrics.add(new Metric("vds.distributor.removes.sum.ok.rate")); metrics.add(new Metric("vds.distributor.removes.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.removes.sum.failures.notfound.rate")); metrics.add(new Metric("vds.distributor.removes.sum.failures.test_and_set_failed")); metrics.add(new Metric("vds.distributor.updates.sum.latency.max")); metrics.add(new Metric("vds.distributor.updates.sum.latency.sum")); metrics.add(new Metric("vds.distributor.updates.sum.latency.count")); metrics.add(new Metric("vds.distributor.updates.sum.latency.average")); metrics.add(new Metric("vds.distributor.updates.sum.ok.rate")); metrics.add(new Metric("vds.distributor.updates.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.updates.sum.failures.notfound.rate")); metrics.add(new Metric("vds.distributor.updates.sum.failures.test_and_set_failed")); metrics.add(new Metric("vds.distributor.updates.sum.diverging_timestamp_updates.rate")); metrics.add(new Metric("vds.distributor.removelocations.sum.ok.rate")); metrics.add(new Metric("vds.distributor.removelocations.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.gets.sum.latency.max")); metrics.add(new Metric("vds.distributor.gets.sum.latency.sum")); metrics.add(new Metric("vds.distributor.gets.sum.latency.count")); metrics.add(new Metric("vds.distributor.gets.sum.latency.average")); metrics.add(new Metric("vds.distributor.gets.sum.ok.rate")); metrics.add(new Metric("vds.distributor.gets.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.gets.sum.failures.notfound.rate")); metrics.add(new Metric("vds.distributor.visitor.sum.latency.max")); metrics.add(new Metric("vds.distributor.visitor.sum.latency.sum")); metrics.add(new Metric("vds.distributor.visitor.sum.latency.count")); metrics.add(new Metric("vds.distributor.visitor.sum.latency.average")); metrics.add(new Metric("vds.distributor.visitor.sum.ok.rate")); metrics.add(new Metric("vds.distributor.visitor.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.docsstored.average")); metrics.add(new Metric("vds.distributor.bytesstored.average")); metrics.add(new Metric("vds.bouncer.clock_skew_aborts.count")); return metrics; }
class VespaMetricSet { public static final MetricSet vespaMetricSet = new MetricSet("vespa", getVespaMetrics(), singleton(defaultVespaMetricSet)); private static Set<Metric> getVespaMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.addAll(getSearchNodeMetrics()); metrics.addAll(getStorageMetrics()); metrics.addAll(getDocprocMetrics()); metrics.addAll(getClusterControllerMetrics()); metrics.addAll(getQrserverMetrics()); metrics.addAll(getContainerMetrics()); metrics.addAll(getConfigServerMetrics()); metrics.addAll(getSentinelMetrics()); metrics.addAll(getOtherMetrics()); return Collections.unmodifiableSet(metrics); } private static Set<Metric> getSentinelMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("sentinel.restarts.count")); metrics.add(new Metric("sentinel.totalRestarts.last")); metrics.add(new Metric("sentinel.uptime.last")); metrics.add(new Metric("sentinel.running.count")); metrics.add(new Metric("sentinel.running.last")); return metrics; } private static Set<Metric> getOtherMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("slobrok.heartbeats.failed.count")); metrics.add(new Metric("logd.processed.lines.count")); metrics.add(new Metric("jrt.transport.tls-certificate-verification-failures")); metrics.add(new Metric("jrt.transport.peer-authorization-failures")); metrics.add(new Metric("jrt.transport.server.tls-connections-established")); metrics.add(new Metric("jrt.transport.client.tls-connections-established")); metrics.add(new Metric("jrt.transport.server.unencrypted-connections-established")); metrics.add(new Metric("jrt.transport.client.unencrypted-connections-established")); metrics.add(new Metric("vds.server.network.tls-handshakes-failed")); metrics.add(new Metric("vds.server.network.peer-authorization-failures")); metrics.add(new Metric("vds.server.network.client.tls-connections-established")); metrics.add(new Metric("vds.server.network.server.tls-connections-established")); metrics.add(new Metric("vds.server.network.client.insecure-connections-established")); metrics.add(new Metric("vds.server.network.server.insecure-connections-established")); metrics.add(new Metric("vds.server.network.tls-connections-broken")); metrics.add(new Metric("vds.server.network.failed-tls-config-reloads")); metrics.add(new Metric("vds.server.fnet.num-connections")); return metrics; } private static Set<Metric> getConfigServerMetrics() { Set<Metric> metrics =new LinkedHashSet<>(); metrics.add(new Metric("configserver.requests.count")); metrics.add(new Metric("configserver.failedRequests.count")); metrics.add(new Metric("configserver.latency.max")); metrics.add(new Metric("configserver.latency.sum")); metrics.add(new Metric("configserver.latency.count")); metrics.add(new Metric("configserver.latency.average")); metrics.add(new Metric("configserver.cacheConfigElems.last")); metrics.add(new Metric("configserver.cacheChecksumElems.last")); metrics.add(new Metric("configserver.hosts.last")); metrics.add(new Metric("configserver.delayedResponses.count")); metrics.add(new Metric("configserver.sessionChangeErrors.count")); metrics.add(new Metric("configserver.zkZNodes.last")); metrics.add(new Metric("configserver.zkAvgLatency.last")); metrics.add(new Metric("configserver.zkMaxLatency.last")); metrics.add(new Metric("configserver.zkConnections.last")); metrics.add(new Metric("configserver.zkOutstandingRequests.last")); return metrics; } private static Set<Metric> getContainerMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("handled.requests.count")); metrics.add(new Metric("handled.latency.max")); metrics.add(new Metric("handled.latency.sum")); metrics.add(new Metric("handled.latency.count")); metrics.add(new Metric("handled.latency.average")); metrics.add(new Metric("serverRejectedRequests.rate")); metrics.add(new Metric("serverRejectedRequests.count")); metrics.add(new Metric("serverThreadPoolSize.average")); metrics.add(new Metric("serverThreadPoolSize.min")); metrics.add(new Metric("serverThreadPoolSize.max")); metrics.add(new Metric("serverThreadPoolSize.rate")); metrics.add(new Metric("serverThreadPoolSize.count")); metrics.add(new Metric("serverThreadPoolSize.last")); metrics.add(new Metric("serverActiveThreads.average")); metrics.add(new Metric("serverActiveThreads.min")); metrics.add(new Metric("serverActiveThreads.max")); metrics.add(new Metric("serverActiveThreads.rate")); metrics.add(new Metric("serverActiveThreads.sum")); metrics.add(new Metric("serverActiveThreads.count")); metrics.add(new Metric("serverActiveThreads.last")); metrics.add(new Metric("httpapi_latency.max")); metrics.add(new Metric("httpapi_latency.sum")); metrics.add(new Metric("httpapi_latency.count")); metrics.add(new Metric("httpapi_latency.average")); metrics.add(new Metric("httpapi_pending.max")); metrics.add(new Metric("httpapi_pending.sum")); metrics.add(new Metric("httpapi_pending.count")); metrics.add(new Metric("httpapi_pending.average")); metrics.add(new Metric("httpapi_num_operations.rate")); metrics.add(new Metric("httpapi_num_updates.rate")); metrics.add(new Metric("httpapi_num_removes.rate")); metrics.add(new Metric("httpapi_num_puts.rate")); metrics.add(new Metric("httpapi_succeeded.rate")); metrics.add(new Metric("httpapi_failed.rate")); metrics.add(new Metric("httpapi_parse_error.rate")); metrics.add(new Metric("mem.heap.total.average")); metrics.add(new Metric("mem.heap.free.average")); metrics.add(new Metric("mem.heap.used.average")); metrics.add(new Metric("mem.heap.used.max")); metrics.add(new Metric("jdisc.memory_mappings.max")); metrics.add(new Metric("jdisc.open_file_descriptors.max")); metrics.add(new Metric("jdisc.gc.count.average")); metrics.add(new Metric("jdisc.gc.count.max")); metrics.add(new Metric("jdisc.gc.count.last")); metrics.add(new Metric("jdisc.gc.ms.average")); metrics.add(new Metric("jdisc.gc.ms.max")); metrics.add(new Metric("jdisc.gc.ms.last")); metrics.add(new Metric("jdisc.deactivated_containers.total.last")); metrics.add(new Metric("jdisc.deactivated_containers.with_retained_refs.last")); metrics.add(new Metric("athenz-tenant-cert.expiry.seconds.last")); metrics.add(new Metric("jdisc.http.request.prematurely_closed.rate")); metrics.add(new Metric("http.status.1xx.rate")); metrics.add(new Metric("http.status.2xx.rate")); metrics.add(new Metric("http.status.3xx.rate")); metrics.add(new Metric("http.status.4xx.rate")); metrics.add(new Metric("http.status.5xx.rate")); metrics.add(new Metric("http.status.401.rate")); metrics.add(new Metric("http.status.403.rate")); metrics.add(new Metric("jdisc.http.request.uri_length.max")); metrics.add(new Metric("jdisc.http.request.uri_length.sum")); metrics.add(new Metric("jdisc.http.request.uri_length.count")); metrics.add(new Metric("jdisc.http.request.uri_length.average")); metrics.add(new Metric("jdisc.http.request.content_size.max")); metrics.add(new Metric("jdisc.http.request.content_size.sum")); metrics.add(new Metric("jdisc.http.request.content_size.count")); metrics.add(new Metric("jdisc.http.request.content_size.average")); return metrics; } private static Set<Metric> getClusterControllerMetrics() { Set<Metric> metrics =new LinkedHashSet<>(); metrics.add(new Metric("cluster-controller.down.count.last")); metrics.add(new Metric("cluster-controller.initializing.count.last")); metrics.add(new Metric("cluster-controller.maintenance.count.last")); metrics.add(new Metric("cluster-controller.retired.count.last")); metrics.add(new Metric("cluster-controller.stopping.count.last")); metrics.add(new Metric("cluster-controller.up.count.last")); metrics.add(new Metric("cluster-controller.cluster-state-change.count")); metrics.add(new Metric("cluster-controller.is-master.last")); metrics.add(new Metric("cluster-controller.node-event.count")); return metrics; } private static Set<Metric> getDocprocMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("documents_processed.rate")); return metrics; } private static Set<Metric> getQrserverMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("peak_qps.max")); metrics.add(new Metric("search_connections.max")); metrics.add(new Metric("search_connections.sum")); metrics.add(new Metric("search_connections.count")); metrics.add(new Metric("search_connections.average")); metrics.add(new Metric("active_queries.max")); metrics.add(new Metric("active_queries.sum")); metrics.add(new Metric("active_queries.count")); metrics.add(new Metric("active_queries.average")); metrics.add(new Metric("feed.latency.max")); metrics.add(new Metric("feed.latency.sum")); metrics.add(new Metric("feed.latency.count")); metrics.add(new Metric("feed.latency.average")); metrics.add(new Metric("feed.http-requests.count")); metrics.add(new Metric("feed.http-requests.rate")); metrics.add(new Metric("queries.rate")); metrics.add(new Metric("query_container_latency.max")); metrics.add(new Metric("query_container_latency.sum")); metrics.add(new Metric("query_container_latency.count")); metrics.add(new Metric("query_container_latency.average")); metrics.add(new Metric("query_latency.max")); metrics.add(new Metric("query_latency.sum")); metrics.add(new Metric("query_latency.count")); metrics.add(new Metric("query_latency.average")); metrics.add(new Metric("query_latency.95percentile")); metrics.add(new Metric("query_latency.99percentile")); metrics.add(new Metric("failed_queries.rate")); metrics.add(new Metric("degraded_queries.rate")); metrics.add(new Metric("hits_per_query.max")); metrics.add(new Metric("hits_per_query.sum")); metrics.add(new Metric("hits_per_query.count")); metrics.add(new Metric("hits_per_query.average")); metrics.add(new Metric("query_hit_offset.max")); metrics.add(new Metric("query_hit_offset.sum")); metrics.add(new Metric("query_hit_offset.count")); metrics.add(new Metric("documents_covered.count")); metrics.add(new Metric("documents_total.count")); metrics.add(new Metric("dispatch_internal.rate")); metrics.add(new Metric("dispatch_fdispatch.rate")); metrics.add(new Metric("totalhits_per_query.max")); metrics.add(new Metric("totalhits_per_query.sum")); metrics.add(new Metric("totalhits_per_query.count")); metrics.add(new Metric("totalhits_per_query.average")); metrics.add(new Metric("empty_results.rate")); metrics.add(new Metric("requestsOverQuota.rate")); metrics.add(new Metric("requestsOverQuota.count")); metrics.add(new Metric("relevance.at_1.sum")); metrics.add(new Metric("relevance.at_1.count")); metrics.add(new Metric("relevance.at_1.average")); metrics.add(new Metric("relevance.at_3.sum")); metrics.add(new Metric("relevance.at_3.count")); metrics.add(new Metric("relevance.at_3.average")); metrics.add(new Metric("relevance.at_10.sum")); metrics.add(new Metric("relevance.at_10.count")); metrics.add(new Metric("relevance.at_10.average")); metrics.add(new Metric("error.timeout.rate")); metrics.add(new Metric("error.backends_oos.rate")); metrics.add(new Metric("error.plugin_failure.rate")); metrics.add(new Metric("error.backend_communication_error.rate")); metrics.add(new Metric("error.empty_document_summaries.rate")); metrics.add(new Metric("error.invalid_query_parameter.rate")); metrics.add(new Metric("error.internal_server_error.rate")); metrics.add(new Metric("error.misconfigured_server.rate")); metrics.add(new Metric("error.invalid_query_transformation.rate")); metrics.add(new Metric("error.result_with_errors.rate")); metrics.add(new Metric("error.unspecified.rate")); metrics.add(new Metric("error.unhandled_exception.rate")); return metrics; } private static Set<Metric> getSearchNodeMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("content.proton.documentdb.documents.total.last")); metrics.add(new Metric("content.proton.documentdb.documents.ready.last")); metrics.add(new Metric("content.proton.documentdb.documents.active.last")); metrics.add(new Metric("content.proton.documentdb.documents.removed.last")); metrics.add(new Metric("content.proton.documentdb.index.docs_in_memory.last")); metrics.add(new Metric("content.proton.documentdb.disk_usage.last")); metrics.add(new Metric("content.proton.documentdb.memory_usage.allocated_bytes.max")); metrics.add(new Metric("content.proton.transport.query.count.rate")); metrics.add(new Metric("content.proton.docsum.docs.rate")); metrics.add(new Metric("content.proton.docsum.latency.max")); metrics.add(new Metric("content.proton.docsum.latency.sum")); metrics.add(new Metric("content.proton.docsum.latency.count")); metrics.add(new Metric("content.proton.docsum.latency.average")); metrics.add(new Metric("content.proton.transport.query.latency.max")); metrics.add(new Metric("content.proton.transport.query.latency.sum")); metrics.add(new Metric("content.proton.transport.query.latency.count")); metrics.add(new Metric("content.proton.transport.query.latency.average")); metrics.add(new Metric("content.proton.search_protocol.query.latency.max")); metrics.add(new Metric("content.proton.search_protocol.query.latency.sum")); metrics.add(new Metric("content.proton.search_protocol.query.latency.count")); metrics.add(new Metric("content.proton.search_protocol.query.request_size.max")); metrics.add(new Metric("content.proton.search_protocol.query.request_size.sum")); metrics.add(new Metric("content.proton.search_protocol.query.request_size.count")); metrics.add(new Metric("content.proton.search_protocol.query.reply_size.max")); metrics.add(new Metric("content.proton.search_protocol.query.reply_size.sum")); metrics.add(new Metric("content.proton.search_protocol.query.reply_size.count")); metrics.add(new Metric("content.proton.search_protocol.docsum.latency.max")); metrics.add(new Metric("content.proton.search_protocol.docsum.latency.sum")); metrics.add(new Metric("content.proton.search_protocol.docsum.latency.count")); metrics.add(new Metric("content.proton.search_protocol.docsum.request_size.max")); metrics.add(new Metric("content.proton.search_protocol.docsum.request_size.sum")); metrics.add(new Metric("content.proton.search_protocol.docsum.request_size.count")); metrics.add(new Metric("content.proton.search_protocol.docsum.reply_size.max")); metrics.add(new Metric("content.proton.search_protocol.docsum.reply_size.sum")); metrics.add(new Metric("content.proton.search_protocol.docsum.reply_size.count")); metrics.add(new Metric("content.proton.search_protocol.docsum.requested_documents.count")); metrics.add(new Metric("content.proton.executor.proton.maxpending.last")); metrics.add(new Metric("content.proton.executor.proton.accepted.rate")); metrics.add(new Metric("content.proton.executor.flush.maxpending.last")); metrics.add(new Metric("content.proton.executor.flush.accepted.rate")); metrics.add(new Metric("content.proton.executor.match.maxpending.last")); metrics.add(new Metric("content.proton.executor.match.accepted.rate")); metrics.add(new Metric("content.proton.executor.docsum.maxpending.last")); metrics.add(new Metric("content.proton.executor.docsum.accepted.rate")); metrics.add(new Metric("content.proton.executor.shared.maxpending.last")); metrics.add(new Metric("content.proton.executor.shared.accepted.rate")); metrics.add(new Metric("content.proton.executor.warmup.maxpending.last")); metrics.add(new Metric("content.proton.executor.warmup.accepted.rate")); metrics.add(new Metric("content.proton.documentdb.job.total.average")); metrics.add(new Metric("content.proton.documentdb.job.attribute_flush.average")); metrics.add(new Metric("content.proton.documentdb.job.memory_index_flush.average")); metrics.add(new Metric("content.proton.documentdb.job.disk_index_fusion.average")); metrics.add(new Metric("content.proton.documentdb.job.document_store_flush.average")); metrics.add(new Metric("content.proton.documentdb.job.document_store_compact.average")); metrics.add(new Metric("content.proton.documentdb.job.bucket_move.average")); metrics.add(new Metric("content.proton.documentdb.job.lid_space_compact.average")); metrics.add(new Metric("content.proton.documentdb.job.removed_documents_prune.average")); metrics.add(new Metric("content.proton.documentdb.threading_service.master.maxpending.last")); metrics.add(new Metric("content.proton.documentdb.threading_service.master.accepted.rate")); metrics.add(new Metric("content.proton.documentdb.threading_service.index.maxpending.last")); metrics.add(new Metric("content.proton.documentdb.threading_service.index.accepted.rate")); metrics.add(new Metric("content.proton.documentdb.threading_service.summary.maxpending.last")); metrics.add(new Metric("content.proton.documentdb.threading_service.summary.accepted.rate")); metrics.add(new Metric("content.proton.documentdb.threading_service.index_field_inverter.maxpending.last")); metrics.add(new Metric("content.proton.documentdb.threading_service.index_field_inverter.accepted.rate")); metrics.add(new Metric("content.proton.documentdb.threading_service.index_field_writer.maxpending.last")); metrics.add(new Metric("content.proton.documentdb.threading_service.index_field_writer.accepted.rate")); metrics.add(new Metric("content.proton.documentdb.threading_service.attribute_field_writer.maxpending.last")); metrics.add(new Metric("content.proton.documentdb.threading_service.attribute_field_writer.accepted.rate")); metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_bloat_factor.average")); metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_bloat_factor.average")); metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_bloat_factor.average")); metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_fragmentation_factor.average")); metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_fragmentation_factor.average")); metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_fragmentation_factor.average")); metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_limit.last")); metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_limit.last")); metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_limit.last")); metrics.add(new Metric("content.proton.resource_usage.disk.average")); metrics.add(new Metric("content.proton.resource_usage.disk_utilization.average")); metrics.add(new Metric("content.proton.resource_usage.memory.average")); metrics.add(new Metric("content.proton.resource_usage.memory_utilization.average")); metrics.add(new Metric("content.proton.resource_usage.memory_mappings.max")); metrics.add(new Metric("content.proton.resource_usage.open_file_descriptors.max")); metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.enum_store.average")); metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.multi_value.average")); metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.feeding_blocked.last")); metrics.add(new Metric("content.proton.transactionlog.entries.average")); metrics.add(new Metric("content.proton.transactionlog.disk_usage.average")); metrics.add(new Metric("content.proton.transactionlog.replay_time.last")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.disk_usage.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.disk_bloat.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.max_bucket_spread.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.disk_usage.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.disk_bloat.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.max_bucket_spread.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.disk_usage.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.disk_bloat.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.max_bucket_spread.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.memory_usage.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.hit_rate.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.lookups.rate")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.invalidations.rate")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.memory_usage.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.hit_rate.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.lookups.rate")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.invalidations.rate")); metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.index.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.index.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.index.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.index.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.matching.queries.rate")); metrics.add(new Metric("content.proton.documentdb.matching.soft_doomed_queries.rate")); metrics.add(new Metric("content.proton.documentdb.matching.query_latency.max")); metrics.add(new Metric("content.proton.documentdb.matching.query_latency.sum")); metrics.add(new Metric("content.proton.documentdb.matching.query_latency.count")); metrics.add(new Metric("content.proton.documentdb.matching.query_latency.average")); metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.max")); metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.sum")); metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.count")); metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.average")); metrics.add(new Metric("content.proton.documentdb.matching.query_setup_time.max")); metrics.add(new Metric("content.proton.documentdb.matching.query_setup_time.sum")); metrics.add(new Metric("content.proton.documentdb.matching.query_setup_time.count")); metrics.add(new Metric("content.proton.documentdb.matching.docs_matched.rate")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.queries.rate")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doomed_queries.rate")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.min")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.average")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.average")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_setup_time.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_setup_time.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_setup_time.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.average")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.docs_matched.rate")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.limited_queries.rate")); return metrics; } }
class VespaMetricSet { public static final MetricSet vespaMetricSet = new MetricSet("vespa", getVespaMetrics(), singleton(defaultVespaMetricSet)); private static Set<Metric> getVespaMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.addAll(getSearchNodeMetrics()); metrics.addAll(getStorageMetrics()); metrics.addAll(getDocprocMetrics()); metrics.addAll(getClusterControllerMetrics()); metrics.addAll(getQrserverMetrics()); metrics.addAll(getContainerMetrics()); metrics.addAll(getConfigServerMetrics()); metrics.addAll(getSentinelMetrics()); metrics.addAll(getOtherMetrics()); return Collections.unmodifiableSet(metrics); } private static Set<Metric> getSentinelMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("sentinel.restarts.count")); metrics.add(new Metric("sentinel.totalRestarts.last")); metrics.add(new Metric("sentinel.uptime.last")); metrics.add(new Metric("sentinel.running.count")); metrics.add(new Metric("sentinel.running.last")); return metrics; } private static Set<Metric> getOtherMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("slobrok.heartbeats.failed.count")); metrics.add(new Metric("logd.processed.lines.count")); metrics.add(new Metric("jrt.transport.tls-certificate-verification-failures")); metrics.add(new Metric("jrt.transport.peer-authorization-failures")); metrics.add(new Metric("jrt.transport.server.tls-connections-established")); metrics.add(new Metric("jrt.transport.client.tls-connections-established")); metrics.add(new Metric("jrt.transport.server.unencrypted-connections-established")); metrics.add(new Metric("jrt.transport.client.unencrypted-connections-established")); metrics.add(new Metric("vds.server.network.tls-handshakes-failed")); metrics.add(new Metric("vds.server.network.peer-authorization-failures")); metrics.add(new Metric("vds.server.network.client.tls-connections-established")); metrics.add(new Metric("vds.server.network.server.tls-connections-established")); metrics.add(new Metric("vds.server.network.client.insecure-connections-established")); metrics.add(new Metric("vds.server.network.server.insecure-connections-established")); metrics.add(new Metric("vds.server.network.tls-connections-broken")); metrics.add(new Metric("vds.server.network.failed-tls-config-reloads")); metrics.add(new Metric("vds.server.fnet.num-connections")); return metrics; } private static Set<Metric> getConfigServerMetrics() { Set<Metric> metrics =new LinkedHashSet<>(); metrics.add(new Metric("configserver.requests.count")); metrics.add(new Metric("configserver.failedRequests.count")); metrics.add(new Metric("configserver.latency.max")); metrics.add(new Metric("configserver.latency.sum")); metrics.add(new Metric("configserver.latency.count")); metrics.add(new Metric("configserver.latency.average")); metrics.add(new Metric("configserver.cacheConfigElems.last")); metrics.add(new Metric("configserver.cacheChecksumElems.last")); metrics.add(new Metric("configserver.hosts.last")); metrics.add(new Metric("configserver.delayedResponses.count")); metrics.add(new Metric("configserver.sessionChangeErrors.count")); metrics.add(new Metric("configserver.zkZNodes.last")); metrics.add(new Metric("configserver.zkAvgLatency.last")); metrics.add(new Metric("configserver.zkMaxLatency.last")); metrics.add(new Metric("configserver.zkConnections.last")); metrics.add(new Metric("configserver.zkOutstandingRequests.last")); return metrics; } private static Set<Metric> getContainerMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("handled.requests.count")); metrics.add(new Metric("handled.latency.max")); metrics.add(new Metric("handled.latency.sum")); metrics.add(new Metric("handled.latency.count")); metrics.add(new Metric("handled.latency.average")); metrics.add(new Metric("serverRejectedRequests.rate")); metrics.add(new Metric("serverRejectedRequests.count")); metrics.add(new Metric("serverThreadPoolSize.average")); metrics.add(new Metric("serverThreadPoolSize.min")); metrics.add(new Metric("serverThreadPoolSize.max")); metrics.add(new Metric("serverThreadPoolSize.rate")); metrics.add(new Metric("serverThreadPoolSize.count")); metrics.add(new Metric("serverThreadPoolSize.last")); metrics.add(new Metric("serverActiveThreads.average")); metrics.add(new Metric("serverActiveThreads.min")); metrics.add(new Metric("serverActiveThreads.max")); metrics.add(new Metric("serverActiveThreads.rate")); metrics.add(new Metric("serverActiveThreads.sum")); metrics.add(new Metric("serverActiveThreads.count")); metrics.add(new Metric("serverActiveThreads.last")); metrics.add(new Metric("httpapi_latency.max")); metrics.add(new Metric("httpapi_latency.sum")); metrics.add(new Metric("httpapi_latency.count")); metrics.add(new Metric("httpapi_latency.average")); metrics.add(new Metric("httpapi_pending.max")); metrics.add(new Metric("httpapi_pending.sum")); metrics.add(new Metric("httpapi_pending.count")); metrics.add(new Metric("httpapi_pending.average")); metrics.add(new Metric("httpapi_num_operations.rate")); metrics.add(new Metric("httpapi_num_updates.rate")); metrics.add(new Metric("httpapi_num_removes.rate")); metrics.add(new Metric("httpapi_num_puts.rate")); metrics.add(new Metric("httpapi_succeeded.rate")); metrics.add(new Metric("httpapi_failed.rate")); metrics.add(new Metric("httpapi_parse_error.rate")); metrics.add(new Metric("mem.heap.total.average")); metrics.add(new Metric("mem.heap.free.average")); metrics.add(new Metric("mem.heap.used.average")); metrics.add(new Metric("mem.heap.used.max")); metrics.add(new Metric("jdisc.memory_mappings.max")); metrics.add(new Metric("jdisc.open_file_descriptors.max")); metrics.add(new Metric("jdisc.gc.count.average")); metrics.add(new Metric("jdisc.gc.count.max")); metrics.add(new Metric("jdisc.gc.count.last")); metrics.add(new Metric("jdisc.gc.ms.average")); metrics.add(new Metric("jdisc.gc.ms.max")); metrics.add(new Metric("jdisc.gc.ms.last")); metrics.add(new Metric("jdisc.deactivated_containers.total.last")); metrics.add(new Metric("jdisc.deactivated_containers.with_retained_refs.last")); metrics.add(new Metric("athenz-tenant-cert.expiry.seconds.last")); metrics.add(new Metric("jdisc.http.request.prematurely_closed.rate")); metrics.add(new Metric("http.status.1xx.rate")); metrics.add(new Metric("http.status.2xx.rate")); metrics.add(new Metric("http.status.3xx.rate")); metrics.add(new Metric("http.status.4xx.rate")); metrics.add(new Metric("http.status.5xx.rate")); metrics.add(new Metric("http.status.401.rate")); metrics.add(new Metric("http.status.403.rate")); metrics.add(new Metric("jdisc.http.request.uri_length.max")); metrics.add(new Metric("jdisc.http.request.uri_length.sum")); metrics.add(new Metric("jdisc.http.request.uri_length.count")); metrics.add(new Metric("jdisc.http.request.uri_length.average")); metrics.add(new Metric("jdisc.http.request.content_size.max")); metrics.add(new Metric("jdisc.http.request.content_size.sum")); metrics.add(new Metric("jdisc.http.request.content_size.count")); metrics.add(new Metric("jdisc.http.request.content_size.average")); return metrics; } private static Set<Metric> getClusterControllerMetrics() { Set<Metric> metrics =new LinkedHashSet<>(); metrics.add(new Metric("cluster-controller.down.count.last")); metrics.add(new Metric("cluster-controller.initializing.count.last")); metrics.add(new Metric("cluster-controller.maintenance.count.last")); metrics.add(new Metric("cluster-controller.retired.count.last")); metrics.add(new Metric("cluster-controller.stopping.count.last")); metrics.add(new Metric("cluster-controller.up.count.last")); metrics.add(new Metric("cluster-controller.cluster-state-change.count")); metrics.add(new Metric("cluster-controller.is-master.last")); metrics.add(new Metric("cluster-controller.node-event.count")); return metrics; } private static Set<Metric> getDocprocMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("documents_processed.rate")); return metrics; } private static Set<Metric> getQrserverMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("peak_qps.max")); metrics.add(new Metric("search_connections.max")); metrics.add(new Metric("search_connections.sum")); metrics.add(new Metric("search_connections.count")); metrics.add(new Metric("search_connections.average")); metrics.add(new Metric("active_queries.max")); metrics.add(new Metric("active_queries.sum")); metrics.add(new Metric("active_queries.count")); metrics.add(new Metric("active_queries.average")); metrics.add(new Metric("feed.latency.max")); metrics.add(new Metric("feed.latency.sum")); metrics.add(new Metric("feed.latency.count")); metrics.add(new Metric("feed.latency.average")); metrics.add(new Metric("feed.http-requests.count")); metrics.add(new Metric("feed.http-requests.rate")); metrics.add(new Metric("queries.rate")); metrics.add(new Metric("query_container_latency.max")); metrics.add(new Metric("query_container_latency.sum")); metrics.add(new Metric("query_container_latency.count")); metrics.add(new Metric("query_container_latency.average")); metrics.add(new Metric("query_latency.max")); metrics.add(new Metric("query_latency.sum")); metrics.add(new Metric("query_latency.count")); metrics.add(new Metric("query_latency.average")); metrics.add(new Metric("query_latency.95percentile")); metrics.add(new Metric("query_latency.99percentile")); metrics.add(new Metric("failed_queries.rate")); metrics.add(new Metric("degraded_queries.rate")); metrics.add(new Metric("hits_per_query.max")); metrics.add(new Metric("hits_per_query.sum")); metrics.add(new Metric("hits_per_query.count")); metrics.add(new Metric("hits_per_query.average")); metrics.add(new Metric("query_hit_offset.max")); metrics.add(new Metric("query_hit_offset.sum")); metrics.add(new Metric("query_hit_offset.count")); metrics.add(new Metric("documents_covered.count")); metrics.add(new Metric("documents_total.count")); metrics.add(new Metric("dispatch_internal.rate")); metrics.add(new Metric("dispatch_fdispatch.rate")); metrics.add(new Metric("totalhits_per_query.max")); metrics.add(new Metric("totalhits_per_query.sum")); metrics.add(new Metric("totalhits_per_query.count")); metrics.add(new Metric("totalhits_per_query.average")); metrics.add(new Metric("empty_results.rate")); metrics.add(new Metric("requestsOverQuota.rate")); metrics.add(new Metric("requestsOverQuota.count")); metrics.add(new Metric("relevance.at_1.sum")); metrics.add(new Metric("relevance.at_1.count")); metrics.add(new Metric("relevance.at_1.average")); metrics.add(new Metric("relevance.at_3.sum")); metrics.add(new Metric("relevance.at_3.count")); metrics.add(new Metric("relevance.at_3.average")); metrics.add(new Metric("relevance.at_10.sum")); metrics.add(new Metric("relevance.at_10.count")); metrics.add(new Metric("relevance.at_10.average")); metrics.add(new Metric("error.timeout.rate")); metrics.add(new Metric("error.backends_oos.rate")); metrics.add(new Metric("error.plugin_failure.rate")); metrics.add(new Metric("error.backend_communication_error.rate")); metrics.add(new Metric("error.empty_document_summaries.rate")); metrics.add(new Metric("error.invalid_query_parameter.rate")); metrics.add(new Metric("error.internal_server_error.rate")); metrics.add(new Metric("error.misconfigured_server.rate")); metrics.add(new Metric("error.invalid_query_transformation.rate")); metrics.add(new Metric("error.result_with_errors.rate")); metrics.add(new Metric("error.unspecified.rate")); metrics.add(new Metric("error.unhandled_exception.rate")); return metrics; } private static Set<Metric> getSearchNodeMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("content.proton.documentdb.documents.total.last")); metrics.add(new Metric("content.proton.documentdb.documents.ready.last")); metrics.add(new Metric("content.proton.documentdb.documents.active.last")); metrics.add(new Metric("content.proton.documentdb.documents.removed.last")); metrics.add(new Metric("content.proton.documentdb.index.docs_in_memory.last")); metrics.add(new Metric("content.proton.documentdb.disk_usage.last")); metrics.add(new Metric("content.proton.documentdb.memory_usage.allocated_bytes.max")); metrics.add(new Metric("content.proton.transport.query.count.rate")); metrics.add(new Metric("content.proton.docsum.docs.rate")); metrics.add(new Metric("content.proton.docsum.latency.max")); metrics.add(new Metric("content.proton.docsum.latency.sum")); metrics.add(new Metric("content.proton.docsum.latency.count")); metrics.add(new Metric("content.proton.docsum.latency.average")); metrics.add(new Metric("content.proton.transport.query.latency.max")); metrics.add(new Metric("content.proton.transport.query.latency.sum")); metrics.add(new Metric("content.proton.transport.query.latency.count")); metrics.add(new Metric("content.proton.transport.query.latency.average")); metrics.add(new Metric("content.proton.search_protocol.query.latency.max")); metrics.add(new Metric("content.proton.search_protocol.query.latency.sum")); metrics.add(new Metric("content.proton.search_protocol.query.latency.count")); metrics.add(new Metric("content.proton.search_protocol.query.request_size.max")); metrics.add(new Metric("content.proton.search_protocol.query.request_size.sum")); metrics.add(new Metric("content.proton.search_protocol.query.request_size.count")); metrics.add(new Metric("content.proton.search_protocol.query.reply_size.max")); metrics.add(new Metric("content.proton.search_protocol.query.reply_size.sum")); metrics.add(new Metric("content.proton.search_protocol.query.reply_size.count")); metrics.add(new Metric("content.proton.search_protocol.docsum.latency.max")); metrics.add(new Metric("content.proton.search_protocol.docsum.latency.sum")); metrics.add(new Metric("content.proton.search_protocol.docsum.latency.count")); metrics.add(new Metric("content.proton.search_protocol.docsum.request_size.max")); metrics.add(new Metric("content.proton.search_protocol.docsum.request_size.sum")); metrics.add(new Metric("content.proton.search_protocol.docsum.request_size.count")); metrics.add(new Metric("content.proton.search_protocol.docsum.reply_size.max")); metrics.add(new Metric("content.proton.search_protocol.docsum.reply_size.sum")); metrics.add(new Metric("content.proton.search_protocol.docsum.reply_size.count")); metrics.add(new Metric("content.proton.search_protocol.docsum.requested_documents.count")); metrics.add(new Metric("content.proton.executor.proton.maxpending.last")); metrics.add(new Metric("content.proton.executor.proton.accepted.rate")); metrics.add(new Metric("content.proton.executor.flush.maxpending.last")); metrics.add(new Metric("content.proton.executor.flush.accepted.rate")); metrics.add(new Metric("content.proton.executor.match.maxpending.last")); metrics.add(new Metric("content.proton.executor.match.accepted.rate")); metrics.add(new Metric("content.proton.executor.docsum.maxpending.last")); metrics.add(new Metric("content.proton.executor.docsum.accepted.rate")); metrics.add(new Metric("content.proton.executor.shared.maxpending.last")); metrics.add(new Metric("content.proton.executor.shared.accepted.rate")); metrics.add(new Metric("content.proton.executor.warmup.maxpending.last")); metrics.add(new Metric("content.proton.executor.warmup.accepted.rate")); metrics.add(new Metric("content.proton.documentdb.job.total.average")); metrics.add(new Metric("content.proton.documentdb.job.attribute_flush.average")); metrics.add(new Metric("content.proton.documentdb.job.memory_index_flush.average")); metrics.add(new Metric("content.proton.documentdb.job.disk_index_fusion.average")); metrics.add(new Metric("content.proton.documentdb.job.document_store_flush.average")); metrics.add(new Metric("content.proton.documentdb.job.document_store_compact.average")); metrics.add(new Metric("content.proton.documentdb.job.bucket_move.average")); metrics.add(new Metric("content.proton.documentdb.job.lid_space_compact.average")); metrics.add(new Metric("content.proton.documentdb.job.removed_documents_prune.average")); metrics.add(new Metric("content.proton.documentdb.threading_service.master.maxpending.last")); metrics.add(new Metric("content.proton.documentdb.threading_service.master.accepted.rate")); metrics.add(new Metric("content.proton.documentdb.threading_service.index.maxpending.last")); metrics.add(new Metric("content.proton.documentdb.threading_service.index.accepted.rate")); metrics.add(new Metric("content.proton.documentdb.threading_service.summary.maxpending.last")); metrics.add(new Metric("content.proton.documentdb.threading_service.summary.accepted.rate")); metrics.add(new Metric("content.proton.documentdb.threading_service.index_field_inverter.maxpending.last")); metrics.add(new Metric("content.proton.documentdb.threading_service.index_field_inverter.accepted.rate")); metrics.add(new Metric("content.proton.documentdb.threading_service.index_field_writer.maxpending.last")); metrics.add(new Metric("content.proton.documentdb.threading_service.index_field_writer.accepted.rate")); metrics.add(new Metric("content.proton.documentdb.threading_service.attribute_field_writer.maxpending.last")); metrics.add(new Metric("content.proton.documentdb.threading_service.attribute_field_writer.accepted.rate")); metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_bloat_factor.average")); metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_bloat_factor.average")); metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_bloat_factor.average")); metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_fragmentation_factor.average")); metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_fragmentation_factor.average")); metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_fragmentation_factor.average")); metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_limit.last")); metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_limit.last")); metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_limit.last")); metrics.add(new Metric("content.proton.resource_usage.disk.average")); metrics.add(new Metric("content.proton.resource_usage.disk_utilization.average")); metrics.add(new Metric("content.proton.resource_usage.memory.average")); metrics.add(new Metric("content.proton.resource_usage.memory_utilization.average")); metrics.add(new Metric("content.proton.resource_usage.memory_mappings.max")); metrics.add(new Metric("content.proton.resource_usage.open_file_descriptors.max")); metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.enum_store.average")); metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.multi_value.average")); metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.feeding_blocked.last")); metrics.add(new Metric("content.proton.transactionlog.entries.average")); metrics.add(new Metric("content.proton.transactionlog.disk_usage.average")); metrics.add(new Metric("content.proton.transactionlog.replay_time.last")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.disk_usage.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.disk_bloat.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.max_bucket_spread.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.disk_usage.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.disk_bloat.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.max_bucket_spread.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.disk_usage.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.disk_bloat.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.max_bucket_spread.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.memory_usage.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.hit_rate.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.lookups.rate")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.invalidations.rate")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.memory_usage.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.hit_rate.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.lookups.rate")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.invalidations.rate")); metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.index.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.index.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.index.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.index.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.matching.queries.rate")); metrics.add(new Metric("content.proton.documentdb.matching.soft_doomed_queries.rate")); metrics.add(new Metric("content.proton.documentdb.matching.query_latency.max")); metrics.add(new Metric("content.proton.documentdb.matching.query_latency.sum")); metrics.add(new Metric("content.proton.documentdb.matching.query_latency.count")); metrics.add(new Metric("content.proton.documentdb.matching.query_latency.average")); metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.max")); metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.sum")); metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.count")); metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.average")); metrics.add(new Metric("content.proton.documentdb.matching.query_setup_time.max")); metrics.add(new Metric("content.proton.documentdb.matching.query_setup_time.sum")); metrics.add(new Metric("content.proton.documentdb.matching.query_setup_time.count")); metrics.add(new Metric("content.proton.documentdb.matching.docs_matched.rate")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.queries.rate")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doomed_queries.rate")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.min")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.average")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.average")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_setup_time.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_setup_time.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_setup_time.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.average")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.docs_matched.rate")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.limited_queries.rate")); return metrics; } }
Argh, `Optional`s. I never managed to decide which is better: the above, or `return jobError.equals(Optional.of(DeploymentJobs.JobError.outOfCapacity));`.
public boolean isOutOfCapacity() { return jobError.filter(error -> error == DeploymentJobs.JobError.outOfCapacity).isPresent(); }
return jobError.filter(error -> error == DeploymentJobs.JobError.outOfCapacity).isPresent();
public boolean isOutOfCapacity() { return jobError.filter(error -> error == DeploymentJobs.JobError.outOfCapacity).isPresent(); }
class JobStatus { private final DeploymentJobs.JobType type; private final Optional<JobRun> lastTriggered; private final Optional<JobRun> lastCompleted; private final Optional<JobRun> firstFailing; private final Optional<JobRun> lastSuccess; private final Optional<DeploymentJobs.JobError> jobError; /** * Used by the persistence layer (only) to create a complete JobStatus instance. * Other creation should be by using initial- and with- methods. */ public JobStatus(DeploymentJobs.JobType type, Optional<DeploymentJobs.JobError> jobError, Optional<JobRun> lastTriggered, Optional<JobRun> lastCompleted, Optional<JobRun> firstFailing, Optional<JobRun> lastSuccess) { requireNonNull(type, "jobType cannot be null"); requireNonNull(jobError, "jobError cannot be null"); requireNonNull(lastTriggered, "lastTriggered cannot be null"); requireNonNull(lastCompleted, "lastCompleted cannot be null"); requireNonNull(firstFailing, "firstFailing cannot be null"); requireNonNull(lastSuccess, "lastSuccess cannot be null"); this.type = type; this.jobError = jobError; this.lastTriggered = type == DeploymentJobs.JobType.component ? Optional.empty() : lastTriggered; this.lastCompleted = lastCompleted; this.firstFailing = firstFailing; this.lastSuccess = lastSuccess; } /** Returns an empty job status */ public static JobStatus initial(DeploymentJobs.JobType type) { return new JobStatus(type, Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty()); } public JobStatus withTriggering(Version platform, ApplicationVersion application, Optional<Deployment> deployment, String reason, Instant triggeredAt) { return withTriggering(JobRun.triggering(platform, application, deployment.map(Deployment::version), deployment.map(Deployment::applicationVersion), reason, triggeredAt)); } public JobStatus withTriggering(JobRun jobRun) { return new JobStatus(type, jobError, Optional.of(jobRun), lastCompleted, firstFailing, lastSuccess); } public JobStatus withCompletion(long runId, Optional<DeploymentJobs.JobError> jobError, Instant completion) { return withCompletion(lastTriggered.get().completion(runId, completion), jobError); } public JobStatus withCompletion(JobRun completion, Optional<DeploymentJobs.JobError> jobError) { Optional<JobRun> firstFailing = this.firstFailing; if (jobError.isPresent() && ! this.firstFailing.isPresent()) firstFailing = Optional.of(completion); Optional<JobRun> lastSuccess = this.lastSuccess; if ( ! jobError.isPresent()) { lastSuccess = Optional.of(completion); firstFailing = Optional.empty(); } return new JobStatus(type, jobError, lastTriggered, Optional.of(completion), firstFailing, lastSuccess); } public DeploymentJobs.JobType type() { return type; } /** Returns true unless this job last completed with a failure */ public boolean isSuccess() { return lastCompleted().isPresent() && ! jobError.isPresent(); } /** The error of the last completion, or empty if the last run succeeded */ public Optional<DeploymentJobs.JobError> jobError() { return jobError; } /** Returns whether this last failed on out of capacity */ /** * Returns the last triggering of this job, or empty if the controller has never triggered it * and not seen a deployment for it */ public Optional<JobRun> lastTriggered() { return lastTriggered; } /** Returns the last completion of this job (whether failing or succeeding), or empty if it never completed */ public Optional<JobRun> lastCompleted() { return lastCompleted; } /** Returns the run when this started failing, or empty if it is not currently failing */ public Optional<JobRun> firstFailing() { return firstFailing; } /** Returns the run when this last succeeded, or empty if it has never succeeded */ public Optional<JobRun> lastSuccess() { return lastSuccess; } @Override public String toString() { return "job status of " + type + "[ " + "last triggered: " + lastTriggered.map(JobRun::toString).orElse("(never)") + ", last completed: " + lastCompleted.map(JobRun::toString).orElse("(never)") + ", first failing: " + firstFailing.map(JobRun::toString).orElse("(not failing)") + ", lastSuccess: " + lastSuccess.map(JobRun::toString).orElse("(never)") + "]"; } @Override public int hashCode() { return Objects.hash(type, jobError, lastTriggered, lastCompleted, firstFailing, lastSuccess); } @Override public boolean equals(Object o) { if (o == this) return true; if ( ! ( o instanceof JobStatus)) return false; JobStatus other = (JobStatus)o; return Objects.equals(type, other.type) && Objects.equals(jobError, other.jobError) && Objects.equals(lastTriggered, other.lastTriggered) && Objects.equals(lastCompleted, other.lastCompleted) && Objects.equals(firstFailing, other.firstFailing) && Objects.equals(lastSuccess, other.lastSuccess); } /** Information about a particular triggering or completion of a run of a job. This is immutable. */ public static class JobRun { private final long id; private final Version platform; private final ApplicationVersion application; private final Optional<Version> sourcePlatform; private final Optional<ApplicationVersion> sourceApplication; private final String reason; private final Instant at; public JobRun(long id, Version platform, ApplicationVersion application, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication, String reason, Instant at) { this.id = id; this.platform = requireNonNull(platform); this.application = requireNonNull(application); this.sourcePlatform = sourcePlatform; this.sourceApplication = sourceApplication; this.reason = requireNonNull(reason); this.at = requireNonNull(at); } public static JobRun triggering(Version platform, ApplicationVersion application, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication, String reason, Instant at) { return new JobRun(-1, platform, application, sourcePlatform, sourceApplication, reason, at); } public JobRun completion(long id, Instant at) { return new JobRun(id, platform, application, sourcePlatform, sourceApplication, reason, at); } /** Returns the id of this run of this job, or -1 if not known */ public long id() { return id; } /** Returns the Vespa version used on this run */ public Version platform() { return platform; } /** Returns the Vespa version this run upgraded from, if already deployed */ public Optional<Version> sourcePlatform() { return sourcePlatform; } /** Returns the application version used in this run */ public ApplicationVersion application() { return application; } /** Returns the application version this run upgraded from, if already deployed */ public Optional<ApplicationVersion> sourceApplication() { return sourceApplication; } /** Returns a human-readable reason for this particular job run */ public String reason() { return reason; } /** Returns the time if this triggering or completion */ public Instant at() { return at; } @Override public String toString() { return "job run " + id + " of version " + platform + (sourcePlatform.map(version -> " (" + version + ")").orElse("")) + " " + application.id() + (sourceApplication.map(version -> " (" + version.id() + ")").orElse("")) + " at " + at; } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof JobRun)) return false; JobRun run = (JobRun) o; if (id != run.id) return false; if (!platform.equals(run.platform)) return false; if (!application.equals(run.application)) return false; if (!sourcePlatform.equals(run.sourcePlatform)) return false; if (!sourceApplication.equals(run.sourceApplication)) return false; return at.equals(run.at); } @Override public int hashCode() { int result = (int) (id ^ (id >>> 32)); result = 31 * result + platform.hashCode(); result = 31 * result + application.hashCode(); result = 31 * result + sourcePlatform.hashCode(); result = 31 * result + sourceApplication.hashCode(); result = 31 * result + at.hashCode(); return result; } } }
class JobStatus { private final DeploymentJobs.JobType type; private final Optional<JobRun> lastTriggered; private final Optional<JobRun> lastCompleted; private final Optional<JobRun> firstFailing; private final Optional<JobRun> lastSuccess; private final Optional<DeploymentJobs.JobError> jobError; /** * Used by the persistence layer (only) to create a complete JobStatus instance. * Other creation should be by using initial- and with- methods. */ public JobStatus(DeploymentJobs.JobType type, Optional<DeploymentJobs.JobError> jobError, Optional<JobRun> lastTriggered, Optional<JobRun> lastCompleted, Optional<JobRun> firstFailing, Optional<JobRun> lastSuccess) { requireNonNull(type, "jobType cannot be null"); requireNonNull(jobError, "jobError cannot be null"); requireNonNull(lastTriggered, "lastTriggered cannot be null"); requireNonNull(lastCompleted, "lastCompleted cannot be null"); requireNonNull(firstFailing, "firstFailing cannot be null"); requireNonNull(lastSuccess, "lastSuccess cannot be null"); this.type = type; this.jobError = jobError; this.lastTriggered = type == DeploymentJobs.JobType.component ? Optional.empty() : lastTriggered; this.lastCompleted = lastCompleted; this.firstFailing = firstFailing; this.lastSuccess = lastSuccess; } /** Returns an empty job status */ public static JobStatus initial(DeploymentJobs.JobType type) { return new JobStatus(type, Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty()); } public JobStatus withTriggering(Version platform, ApplicationVersion application, Optional<Deployment> deployment, String reason, Instant triggeredAt) { return withTriggering(JobRun.triggering(platform, application, deployment.map(Deployment::version), deployment.map(Deployment::applicationVersion), reason, triggeredAt)); } public JobStatus withTriggering(JobRun jobRun) { return new JobStatus(type, jobError, Optional.of(jobRun), lastCompleted, firstFailing, lastSuccess); } public JobStatus withCompletion(long runId, Optional<DeploymentJobs.JobError> jobError, Instant completion) { return withCompletion(lastTriggered.get().completion(runId, completion), jobError); } public JobStatus withCompletion(JobRun completion, Optional<DeploymentJobs.JobError> jobError) { Optional<JobRun> firstFailing = this.firstFailing; if (jobError.isPresent() && ! this.firstFailing.isPresent()) firstFailing = Optional.of(completion); Optional<JobRun> lastSuccess = this.lastSuccess; if ( ! jobError.isPresent()) { lastSuccess = Optional.of(completion); firstFailing = Optional.empty(); } return new JobStatus(type, jobError, lastTriggered, Optional.of(completion), firstFailing, lastSuccess); } public DeploymentJobs.JobType type() { return type; } /** Returns true unless this job last completed with a failure */ public boolean isSuccess() { return lastCompleted().isPresent() && ! jobError.isPresent(); } /** The error of the last completion, or empty if the last run succeeded */ public Optional<DeploymentJobs.JobError> jobError() { return jobError; } /** Returns whether this last failed on out of capacity */ /** * Returns the last triggering of this job, or empty if the controller has never triggered it * and not seen a deployment for it */ public Optional<JobRun> lastTriggered() { return lastTriggered; } /** Returns the last completion of this job (whether failing or succeeding), or empty if it never completed */ public Optional<JobRun> lastCompleted() { return lastCompleted; } /** Returns the run when this started failing, or empty if it is not currently failing */ public Optional<JobRun> firstFailing() { return firstFailing; } /** Returns the run when this last succeeded, or empty if it has never succeeded */ public Optional<JobRun> lastSuccess() { return lastSuccess; } @Override public String toString() { return "job status of " + type + "[ " + "last triggered: " + lastTriggered.map(JobRun::toString).orElse("(never)") + ", last completed: " + lastCompleted.map(JobRun::toString).orElse("(never)") + ", first failing: " + firstFailing.map(JobRun::toString).orElse("(not failing)") + ", lastSuccess: " + lastSuccess.map(JobRun::toString).orElse("(never)") + "]"; } @Override public int hashCode() { return Objects.hash(type, jobError, lastTriggered, lastCompleted, firstFailing, lastSuccess); } @Override public boolean equals(Object o) { if (o == this) return true; if ( ! ( o instanceof JobStatus)) return false; JobStatus other = (JobStatus)o; return Objects.equals(type, other.type) && Objects.equals(jobError, other.jobError) && Objects.equals(lastTriggered, other.lastTriggered) && Objects.equals(lastCompleted, other.lastCompleted) && Objects.equals(firstFailing, other.firstFailing) && Objects.equals(lastSuccess, other.lastSuccess); } /** Information about a particular triggering or completion of a run of a job. This is immutable. */ public static class JobRun { private final long id; private final Version platform; private final ApplicationVersion application; private final Optional<Version> sourcePlatform; private final Optional<ApplicationVersion> sourceApplication; private final String reason; private final Instant at; public JobRun(long id, Version platform, ApplicationVersion application, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication, String reason, Instant at) { this.id = id; this.platform = requireNonNull(platform); this.application = requireNonNull(application); this.sourcePlatform = sourcePlatform; this.sourceApplication = sourceApplication; this.reason = requireNonNull(reason); this.at = requireNonNull(at); } public static JobRun triggering(Version platform, ApplicationVersion application, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication, String reason, Instant at) { return new JobRun(-1, platform, application, sourcePlatform, sourceApplication, reason, at); } public JobRun completion(long id, Instant at) { return new JobRun(id, platform, application, sourcePlatform, sourceApplication, reason, at); } /** Returns the id of this run of this job, or -1 if not known */ public long id() { return id; } /** Returns the Vespa version used on this run */ public Version platform() { return platform; } /** Returns the Vespa version this run upgraded from, if already deployed */ public Optional<Version> sourcePlatform() { return sourcePlatform; } /** Returns the application version used in this run */ public ApplicationVersion application() { return application; } /** Returns the application version this run upgraded from, if already deployed */ public Optional<ApplicationVersion> sourceApplication() { return sourceApplication; } /** Returns a human-readable reason for this particular job run */ public String reason() { return reason; } /** Returns the time if this triggering or completion */ public Instant at() { return at; } @Override public String toString() { return "job run " + id + " of version " + platform + (sourcePlatform.map(version -> " (" + version + ")").orElse("")) + " " + application.id() + (sourceApplication.map(version -> " (" + version.id() + ")").orElse("")) + " at " + at; } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof JobRun)) return false; JobRun run = (JobRun) o; if (id != run.id) return false; if (!platform.equals(run.platform)) return false; if (!application.equals(run.application)) return false; if (!sourcePlatform.equals(run.sourcePlatform)) return false; if (!sourceApplication.equals(run.sourceApplication)) return false; return at.equals(run.at); } @Override public int hashCode() { int result = (int) (id ^ (id >>> 32)); result = 31 * result + platform.hashCode(); result = 31 * result + application.hashCode(); result = 31 * result + sourcePlatform.hashCode(); result = 31 * result + sourceApplication.hashCode(); result = 31 * result + at.hashCode(); return result; } } }
I'll probably replace the job error with a job status some time soon, and then this won't be an issue :)
public boolean isOutOfCapacity() { return jobError.filter(error -> error == DeploymentJobs.JobError.outOfCapacity).isPresent(); }
return jobError.filter(error -> error == DeploymentJobs.JobError.outOfCapacity).isPresent();
public boolean isOutOfCapacity() { return jobError.filter(error -> error == DeploymentJobs.JobError.outOfCapacity).isPresent(); }
class JobStatus { private final DeploymentJobs.JobType type; private final Optional<JobRun> lastTriggered; private final Optional<JobRun> lastCompleted; private final Optional<JobRun> firstFailing; private final Optional<JobRun> lastSuccess; private final Optional<DeploymentJobs.JobError> jobError; /** * Used by the persistence layer (only) to create a complete JobStatus instance. * Other creation should be by using initial- and with- methods. */ public JobStatus(DeploymentJobs.JobType type, Optional<DeploymentJobs.JobError> jobError, Optional<JobRun> lastTriggered, Optional<JobRun> lastCompleted, Optional<JobRun> firstFailing, Optional<JobRun> lastSuccess) { requireNonNull(type, "jobType cannot be null"); requireNonNull(jobError, "jobError cannot be null"); requireNonNull(lastTriggered, "lastTriggered cannot be null"); requireNonNull(lastCompleted, "lastCompleted cannot be null"); requireNonNull(firstFailing, "firstFailing cannot be null"); requireNonNull(lastSuccess, "lastSuccess cannot be null"); this.type = type; this.jobError = jobError; this.lastTriggered = type == DeploymentJobs.JobType.component ? Optional.empty() : lastTriggered; this.lastCompleted = lastCompleted; this.firstFailing = firstFailing; this.lastSuccess = lastSuccess; } /** Returns an empty job status */ public static JobStatus initial(DeploymentJobs.JobType type) { return new JobStatus(type, Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty()); } public JobStatus withTriggering(Version platform, ApplicationVersion application, Optional<Deployment> deployment, String reason, Instant triggeredAt) { return withTriggering(JobRun.triggering(platform, application, deployment.map(Deployment::version), deployment.map(Deployment::applicationVersion), reason, triggeredAt)); } public JobStatus withTriggering(JobRun jobRun) { return new JobStatus(type, jobError, Optional.of(jobRun), lastCompleted, firstFailing, lastSuccess); } public JobStatus withCompletion(long runId, Optional<DeploymentJobs.JobError> jobError, Instant completion) { return withCompletion(lastTriggered.get().completion(runId, completion), jobError); } public JobStatus withCompletion(JobRun completion, Optional<DeploymentJobs.JobError> jobError) { Optional<JobRun> firstFailing = this.firstFailing; if (jobError.isPresent() && ! this.firstFailing.isPresent()) firstFailing = Optional.of(completion); Optional<JobRun> lastSuccess = this.lastSuccess; if ( ! jobError.isPresent()) { lastSuccess = Optional.of(completion); firstFailing = Optional.empty(); } return new JobStatus(type, jobError, lastTriggered, Optional.of(completion), firstFailing, lastSuccess); } public DeploymentJobs.JobType type() { return type; } /** Returns true unless this job last completed with a failure */ public boolean isSuccess() { return lastCompleted().isPresent() && ! jobError.isPresent(); } /** The error of the last completion, or empty if the last run succeeded */ public Optional<DeploymentJobs.JobError> jobError() { return jobError; } /** Returns whether this last failed on out of capacity */ /** * Returns the last triggering of this job, or empty if the controller has never triggered it * and not seen a deployment for it */ public Optional<JobRun> lastTriggered() { return lastTriggered; } /** Returns the last completion of this job (whether failing or succeeding), or empty if it never completed */ public Optional<JobRun> lastCompleted() { return lastCompleted; } /** Returns the run when this started failing, or empty if it is not currently failing */ public Optional<JobRun> firstFailing() { return firstFailing; } /** Returns the run when this last succeeded, or empty if it has never succeeded */ public Optional<JobRun> lastSuccess() { return lastSuccess; } @Override public String toString() { return "job status of " + type + "[ " + "last triggered: " + lastTriggered.map(JobRun::toString).orElse("(never)") + ", last completed: " + lastCompleted.map(JobRun::toString).orElse("(never)") + ", first failing: " + firstFailing.map(JobRun::toString).orElse("(not failing)") + ", lastSuccess: " + lastSuccess.map(JobRun::toString).orElse("(never)") + "]"; } @Override public int hashCode() { return Objects.hash(type, jobError, lastTriggered, lastCompleted, firstFailing, lastSuccess); } @Override public boolean equals(Object o) { if (o == this) return true; if ( ! ( o instanceof JobStatus)) return false; JobStatus other = (JobStatus)o; return Objects.equals(type, other.type) && Objects.equals(jobError, other.jobError) && Objects.equals(lastTriggered, other.lastTriggered) && Objects.equals(lastCompleted, other.lastCompleted) && Objects.equals(firstFailing, other.firstFailing) && Objects.equals(lastSuccess, other.lastSuccess); } /** Information about a particular triggering or completion of a run of a job. This is immutable. */ public static class JobRun { private final long id; private final Version platform; private final ApplicationVersion application; private final Optional<Version> sourcePlatform; private final Optional<ApplicationVersion> sourceApplication; private final String reason; private final Instant at; public JobRun(long id, Version platform, ApplicationVersion application, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication, String reason, Instant at) { this.id = id; this.platform = requireNonNull(platform); this.application = requireNonNull(application); this.sourcePlatform = sourcePlatform; this.sourceApplication = sourceApplication; this.reason = requireNonNull(reason); this.at = requireNonNull(at); } public static JobRun triggering(Version platform, ApplicationVersion application, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication, String reason, Instant at) { return new JobRun(-1, platform, application, sourcePlatform, sourceApplication, reason, at); } public JobRun completion(long id, Instant at) { return new JobRun(id, platform, application, sourcePlatform, sourceApplication, reason, at); } /** Returns the id of this run of this job, or -1 if not known */ public long id() { return id; } /** Returns the Vespa version used on this run */ public Version platform() { return platform; } /** Returns the Vespa version this run upgraded from, if already deployed */ public Optional<Version> sourcePlatform() { return sourcePlatform; } /** Returns the application version used in this run */ public ApplicationVersion application() { return application; } /** Returns the application version this run upgraded from, if already deployed */ public Optional<ApplicationVersion> sourceApplication() { return sourceApplication; } /** Returns a human-readable reason for this particular job run */ public String reason() { return reason; } /** Returns the time if this triggering or completion */ public Instant at() { return at; } @Override public String toString() { return "job run " + id + " of version " + platform + (sourcePlatform.map(version -> " (" + version + ")").orElse("")) + " " + application.id() + (sourceApplication.map(version -> " (" + version.id() + ")").orElse("")) + " at " + at; } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof JobRun)) return false; JobRun run = (JobRun) o; if (id != run.id) return false; if (!platform.equals(run.platform)) return false; if (!application.equals(run.application)) return false; if (!sourcePlatform.equals(run.sourcePlatform)) return false; if (!sourceApplication.equals(run.sourceApplication)) return false; return at.equals(run.at); } @Override public int hashCode() { int result = (int) (id ^ (id >>> 32)); result = 31 * result + platform.hashCode(); result = 31 * result + application.hashCode(); result = 31 * result + sourcePlatform.hashCode(); result = 31 * result + sourceApplication.hashCode(); result = 31 * result + at.hashCode(); return result; } } }
class JobStatus { private final DeploymentJobs.JobType type; private final Optional<JobRun> lastTriggered; private final Optional<JobRun> lastCompleted; private final Optional<JobRun> firstFailing; private final Optional<JobRun> lastSuccess; private final Optional<DeploymentJobs.JobError> jobError; /** * Used by the persistence layer (only) to create a complete JobStatus instance. * Other creation should be by using initial- and with- methods. */ public JobStatus(DeploymentJobs.JobType type, Optional<DeploymentJobs.JobError> jobError, Optional<JobRun> lastTriggered, Optional<JobRun> lastCompleted, Optional<JobRun> firstFailing, Optional<JobRun> lastSuccess) { requireNonNull(type, "jobType cannot be null"); requireNonNull(jobError, "jobError cannot be null"); requireNonNull(lastTriggered, "lastTriggered cannot be null"); requireNonNull(lastCompleted, "lastCompleted cannot be null"); requireNonNull(firstFailing, "firstFailing cannot be null"); requireNonNull(lastSuccess, "lastSuccess cannot be null"); this.type = type; this.jobError = jobError; this.lastTriggered = type == DeploymentJobs.JobType.component ? Optional.empty() : lastTriggered; this.lastCompleted = lastCompleted; this.firstFailing = firstFailing; this.lastSuccess = lastSuccess; } /** Returns an empty job status */ public static JobStatus initial(DeploymentJobs.JobType type) { return new JobStatus(type, Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty()); } public JobStatus withTriggering(Version platform, ApplicationVersion application, Optional<Deployment> deployment, String reason, Instant triggeredAt) { return withTriggering(JobRun.triggering(platform, application, deployment.map(Deployment::version), deployment.map(Deployment::applicationVersion), reason, triggeredAt)); } public JobStatus withTriggering(JobRun jobRun) { return new JobStatus(type, jobError, Optional.of(jobRun), lastCompleted, firstFailing, lastSuccess); } public JobStatus withCompletion(long runId, Optional<DeploymentJobs.JobError> jobError, Instant completion) { return withCompletion(lastTriggered.get().completion(runId, completion), jobError); } public JobStatus withCompletion(JobRun completion, Optional<DeploymentJobs.JobError> jobError) { Optional<JobRun> firstFailing = this.firstFailing; if (jobError.isPresent() && ! this.firstFailing.isPresent()) firstFailing = Optional.of(completion); Optional<JobRun> lastSuccess = this.lastSuccess; if ( ! jobError.isPresent()) { lastSuccess = Optional.of(completion); firstFailing = Optional.empty(); } return new JobStatus(type, jobError, lastTriggered, Optional.of(completion), firstFailing, lastSuccess); } public DeploymentJobs.JobType type() { return type; } /** Returns true unless this job last completed with a failure */ public boolean isSuccess() { return lastCompleted().isPresent() && ! jobError.isPresent(); } /** The error of the last completion, or empty if the last run succeeded */ public Optional<DeploymentJobs.JobError> jobError() { return jobError; } /** Returns whether this last failed on out of capacity */ /** * Returns the last triggering of this job, or empty if the controller has never triggered it * and not seen a deployment for it */ public Optional<JobRun> lastTriggered() { return lastTriggered; } /** Returns the last completion of this job (whether failing or succeeding), or empty if it never completed */ public Optional<JobRun> lastCompleted() { return lastCompleted; } /** Returns the run when this started failing, or empty if it is not currently failing */ public Optional<JobRun> firstFailing() { return firstFailing; } /** Returns the run when this last succeeded, or empty if it has never succeeded */ public Optional<JobRun> lastSuccess() { return lastSuccess; } @Override public String toString() { return "job status of " + type + "[ " + "last triggered: " + lastTriggered.map(JobRun::toString).orElse("(never)") + ", last completed: " + lastCompleted.map(JobRun::toString).orElse("(never)") + ", first failing: " + firstFailing.map(JobRun::toString).orElse("(not failing)") + ", lastSuccess: " + lastSuccess.map(JobRun::toString).orElse("(never)") + "]"; } @Override public int hashCode() { return Objects.hash(type, jobError, lastTriggered, lastCompleted, firstFailing, lastSuccess); } @Override public boolean equals(Object o) { if (o == this) return true; if ( ! ( o instanceof JobStatus)) return false; JobStatus other = (JobStatus)o; return Objects.equals(type, other.type) && Objects.equals(jobError, other.jobError) && Objects.equals(lastTriggered, other.lastTriggered) && Objects.equals(lastCompleted, other.lastCompleted) && Objects.equals(firstFailing, other.firstFailing) && Objects.equals(lastSuccess, other.lastSuccess); } /** Information about a particular triggering or completion of a run of a job. This is immutable. */ public static class JobRun { private final long id; private final Version platform; private final ApplicationVersion application; private final Optional<Version> sourcePlatform; private final Optional<ApplicationVersion> sourceApplication; private final String reason; private final Instant at; public JobRun(long id, Version platform, ApplicationVersion application, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication, String reason, Instant at) { this.id = id; this.platform = requireNonNull(platform); this.application = requireNonNull(application); this.sourcePlatform = sourcePlatform; this.sourceApplication = sourceApplication; this.reason = requireNonNull(reason); this.at = requireNonNull(at); } public static JobRun triggering(Version platform, ApplicationVersion application, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication, String reason, Instant at) { return new JobRun(-1, platform, application, sourcePlatform, sourceApplication, reason, at); } public JobRun completion(long id, Instant at) { return new JobRun(id, platform, application, sourcePlatform, sourceApplication, reason, at); } /** Returns the id of this run of this job, or -1 if not known */ public long id() { return id; } /** Returns the Vespa version used on this run */ public Version platform() { return platform; } /** Returns the Vespa version this run upgraded from, if already deployed */ public Optional<Version> sourcePlatform() { return sourcePlatform; } /** Returns the application version used in this run */ public ApplicationVersion application() { return application; } /** Returns the application version this run upgraded from, if already deployed */ public Optional<ApplicationVersion> sourceApplication() { return sourceApplication; } /** Returns a human-readable reason for this particular job run */ public String reason() { return reason; } /** Returns the time if this triggering or completion */ public Instant at() { return at; } @Override public String toString() { return "job run " + id + " of version " + platform + (sourcePlatform.map(version -> " (" + version + ")").orElse("")) + " " + application.id() + (sourceApplication.map(version -> " (" + version.id() + ")").orElse("")) + " at " + at; } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof JobRun)) return false; JobRun run = (JobRun) o; if (id != run.id) return false; if (!platform.equals(run.platform)) return false; if (!application.equals(run.application)) return false; if (!sourcePlatform.equals(run.sourcePlatform)) return false; if (!sourceApplication.equals(run.sourceApplication)) return false; return at.equals(run.at); } @Override public int hashCode() { int result = (int) (id ^ (id >>> 32)); result = 31 * result + platform.hashCode(); result = 31 * result + application.hashCode(); result = 31 * result + sourcePlatform.hashCode(); result = 31 * result + sourceApplication.hashCode(); result = 31 * result + at.hashCode(); return result; } } }
An assert before this that the job wasn't running would be nice.
public void stepIsCompletePreciselyWhenItShouldBe() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); Supplier<Application> app = () -> tester.application(application.id()); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("us-central-1") .region("eu-west-1") .upgradePolicy("canary") .build(); tester.deployCompletely(application, applicationPackage); Version v2 = new Version("7.2"); tester.upgradeSystem(v2); tester.completeUpgradeWithError(application, v2, applicationPackage, productionUsCentral1); tester.deploy(productionUsCentral1, application, applicationPackage); tester.deployAndNotify(application, applicationPackage, false, productionUsCentral1); assertEquals(v2, app.get().deployments().get(productionUsCentral1.zone(main).get()).version()); tester.deploymentTrigger().cancelChange(application.id(), false); tester.deployAndNotify(application, applicationPackage, false, productionUsCentral1); Instant triggered = app.get().deploymentJobs().jobStatus().get(productionUsCentral1).lastTriggered().get().at(); tester.clock().advance(Duration.ofHours(1)); Version v1 = new Version("7.1"); tester.upgradeSystem(v1); assertEquals(Change.of(v1), app.get().change()); tester.deployAndNotify(application, applicationPackage, true, systemTest); tester.deployAndNotify(application, applicationPackage, true, stagingTest); assertEquals(triggered, app.get().deploymentJobs().jobStatus().get(productionUsCentral1).lastTriggered().get().at()); tester.deployAndNotify(application, applicationPackage, false, productionEuWest1); tester.jobCompletion(component).application(application).nextBuildNumber().uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, applicationPackage, false, productionEuWest1); tester.deployAndNotify(application, applicationPackage, true, systemTest); tester.deployAndNotify(application, applicationPackage, true, stagingTest); tester.clock().advance(Duration.ofHours(1).plus(Duration.ofSeconds(1))); tester.readyJobTrigger().maintain(); assertEquals(v2, app.get().deployments().get(productionUsCentral1.zone(main).get()).version()); assertEquals(Long.valueOf(42L), app.get().deployments().get(productionUsCentral1.zone(main).get()).applicationVersion().buildNumber().get()); assertNotEquals(triggered, app.get().deploymentJobs().jobStatus().get(productionUsCentral1).lastTriggered().get().at()); tester.deployAndNotify(application, applicationPackage, false, productionUsCentral1); tester.deploy(productionUsCentral1, application, applicationPackage); assertEquals(v2, app.get().deployments().get(productionUsCentral1.zone(main).get()).version()); assertEquals(Long.valueOf(43), app.get().deployments().get(productionUsCentral1.zone(main).get()).applicationVersion().buildNumber().get()); tester.clock().advance(Duration.ofHours(2).plus(Duration.ofSeconds(1))); tester.readyJobTrigger().maintain(); tester.assertNotRunning(productionUsCentral1, application.id()); tester.deployAndNotify(application, empty(), true, systemTest); tester.deployAndNotify(application, empty(), true, stagingTest); tester.deployAndNotify(application, applicationPackage, true, productionEuWest1); assertFalse(app.get().change().isPresent()); assertFalse(app.get().deploymentJobs().jobStatus().get(productionUsCentral1).isSuccess()); }
tester.clock().advance(Duration.ofHours(1).plus(Duration.ofSeconds(1)));
public void stepIsCompletePreciselyWhenItShouldBe() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); Supplier<Application> app = () -> tester.application(application.id()); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("us-central-1") .region("eu-west-1") .upgradePolicy("canary") .build(); tester.deployCompletely(application, applicationPackage); Version v2 = new Version("7.2"); tester.upgradeSystem(v2); tester.completeUpgradeWithError(application, v2, applicationPackage, productionUsCentral1); tester.deploy(productionUsCentral1, application, applicationPackage); tester.deployAndNotify(application, applicationPackage, false, productionUsCentral1); assertEquals(v2, app.get().deployments().get(productionUsCentral1.zone(main).get()).version()); tester.deploymentTrigger().cancelChange(application.id(), false); tester.deployAndNotify(application, applicationPackage, false, productionUsCentral1); Instant triggered = app.get().deploymentJobs().jobStatus().get(productionUsCentral1).lastTriggered().get().at(); tester.clock().advance(Duration.ofHours(1)); Version v1 = new Version("7.1"); tester.upgradeSystem(v1); assertEquals(Change.of(v1), app.get().change()); tester.deployAndNotify(application, applicationPackage, true, systemTest); tester.deployAndNotify(application, applicationPackage, true, stagingTest); assertEquals(triggered, app.get().deploymentJobs().jobStatus().get(productionUsCentral1).lastTriggered().get().at()); tester.deployAndNotify(application, applicationPackage, false, productionEuWest1); tester.jobCompletion(component).application(application).nextBuildNumber().uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, applicationPackage, false, productionEuWest1); tester.deployAndNotify(application, applicationPackage, true, systemTest); tester.deployAndNotify(application, applicationPackage, true, stagingTest); tester.assertRunning(productionUsCentral1, application.id()); assertEquals(v2, app.get().deployments().get(productionUsCentral1.zone(main).get()).version()); assertEquals(Long.valueOf(42L), app.get().deployments().get(productionUsCentral1.zone(main).get()).applicationVersion().buildNumber().get()); assertNotEquals(triggered, app.get().deploymentJobs().jobStatus().get(productionUsCentral1).lastTriggered().get().at()); tester.deployAndNotify(application, applicationPackage, false, productionUsCentral1); tester.deploy(productionUsCentral1, application, applicationPackage); assertEquals(v2, app.get().deployments().get(productionUsCentral1.zone(main).get()).version()); assertEquals(Long.valueOf(43), app.get().deployments().get(productionUsCentral1.zone(main).get()).applicationVersion().buildNumber().get()); tester.clock().advance(Duration.ofHours(2).plus(Duration.ofSeconds(1))); tester.readyJobTrigger().maintain(); tester.assertNotRunning(productionUsCentral1, application.id()); tester.deployAndNotify(application, true, systemTest); tester.deployAndNotify(application, true, stagingTest); tester.deployAndNotify(application, applicationPackage, true, productionEuWest1); assertFalse(app.get().change().isPresent()); assertFalse(app.get().deploymentJobs().jobStatus().get(productionUsCentral1).isSuccess()); }
class DeploymentTriggerTest { @Test public void testTriggerFailing() { DeploymentTester tester = new DeploymentTester(); Application app = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .upgradePolicy("default") .environment(Environment.prod) .region("us-west-1") .build(); Version version = new Version(5, 1); tester.upgradeSystem(version); tester.jobCompletion(component).application(app).uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.systemTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.stagingTest); tester.deployAndNotify(app, applicationPackage, true, JobType.productionUsWest1); version = new Version(5, 2); tester.upgradeSystem(version); tester.buildService().remove(buildJob(app, stagingTest)); tester.readyJobTrigger().maintain(); assertEquals("Retried dead job", 2, tester.buildService().jobs().size()); tester.assertRunning(stagingTest, app.id()); tester.deployAndNotify(app, applicationPackage, true, stagingTest); tester.assertRunning(systemTest, app.id()); assertEquals(1, tester.buildService().jobs().size()); tester.deployAndNotify(app, applicationPackage, false, JobType.systemTest); assertEquals("Job is retried on failure", 1, tester.buildService().jobs().size()); tester.deployAndNotify(app, applicationPackage, true, JobType.systemTest); tester.assertRunning(productionUsWest1, app.id()); } @Test public void deploymentSpecDecidesTriggerOrder() { DeploymentTester tester = new DeploymentTester(); TenantName tenant = tester.controllerTester().createTenant("tenant1", "domain1", 1L); MockBuildService mockBuildService = tester.buildService(); Application application = tester.controllerTester().createApplication(tenant, "app1", "default", 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .build(); tester.jobCompletion(component).application(application).uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); tester.deployAndNotify(application, applicationPackage, true, JobType.productionCorpUsEast1); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsWest1); assertTrue("All jobs consumed", mockBuildService.jobs().isEmpty()); } @Test public void deploymentsSpecWithDelays() { DeploymentTester tester = new DeploymentTester(); MockBuildService mockBuildService = tester.buildService(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .delay(Duration.ofSeconds(30)) .region("us-west-1") .delay(Duration.ofMinutes(2)) .delay(Duration.ofMinutes(2)) .region("us-central-1") .delay(Duration.ofMinutes(10)) .build(); tester.jobCompletion(component).application(application).uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); tester.deploymentTrigger().triggerReadyJobs(); assertEquals(0, mockBuildService.jobs().size()); tester.clock().advance(Duration.ofSeconds(30)); tester.deploymentTrigger().triggerReadyJobs(); assertEquals(1, mockBuildService.jobs().size()); tester.assertRunning(productionUsWest1, application.id()); tester.clock().advance(Duration.ofMinutes(3)); tester.deploymentTrigger().triggerReadyJobs(); assertEquals(1, mockBuildService.jobs().size()); tester.assertRunning(productionUsWest1, application.id()); tester.deployAndNotify(application, applicationPackage, true, productionUsWest1); tester.deploymentTrigger().triggerReadyJobs(); assertTrue("No more jobs triggered at this time", mockBuildService.jobs().isEmpty()); tester.clock().advance(Duration.ofMinutes(3)); tester.deploymentTrigger().triggerReadyJobs(); assertTrue("No more jobs triggered at this time", mockBuildService.jobs().isEmpty()); tester.clock().advance(Duration.ofMinutes(1)); tester.deploymentTrigger().triggerReadyJobs(); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); assertTrue("All jobs consumed", mockBuildService.jobs().isEmpty()); tester.clock().advance(Duration.ofMinutes(10)); tester.deploymentTrigger().triggerReadyJobs(); assertTrue("All jobs consumed", mockBuildService.jobs().isEmpty()); } @Test public void deploymentSpecWithParallelDeployments() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("us-central-1") .parallel("us-west-1", "us-east-3") .region("eu-west-1") .build(); tester.jobCompletion(component).application(application).uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); assertEquals(1, tester.buildService().jobs().size()); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); assertEquals(2, tester.buildService().jobs().size()); tester.assertRunning(productionUsEast3, application.id()); tester.assertRunning(productionUsWest1, application.id()); tester.deploy(JobType.productionUsWest1, application, applicationPackage, false); tester.jobCompletion(JobType.productionUsWest1).application(application).submit(); assertEquals("One job still running.", JobType.productionUsEast3.jobName(), tester.buildService().jobs().get(0).jobName()); tester.deploy(JobType.productionUsEast3, application, applicationPackage, false); tester.jobCompletion(JobType.productionUsEast3).application(application).submit(); assertEquals(1, tester.buildService().jobs().size()); tester.deployAndNotify(application, applicationPackage, true, JobType.productionEuWest1); assertTrue("All jobs consumed", tester.buildService().jobs().isEmpty()); } @Test public void parallelDeploymentCompletesOutOfOrder() { DeploymentTester tester = new DeploymentTester(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .parallel("us-east-3", "us-west-1") .build(); Application app = tester.createApplication("app1", "tenant1", 1, 11L); tester.jobCompletion(component).application(app).uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.systemTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.stagingTest); tester.deploy(DeploymentJobs.JobType.productionUsWest1, app, applicationPackage); tester.jobCompletion(DeploymentJobs.JobType.productionUsWest1).application(app).submit(); assertTrue("Change is present as not all jobs are complete", tester.applications().require(app.id()).change().isPresent()); tester.deploy(DeploymentJobs.JobType.productionUsEast3, app, applicationPackage); tester.jobCompletion(JobType.productionUsEast3).application(app).submit(); assertFalse("Change has been deployed", tester.applications().require(app.id()).change().isPresent()); } @Test public void testSuccessfulDeploymentApplicationPackageChanged() { DeploymentTester tester = new DeploymentTester(); TenantName tenant = tester.controllerTester().createTenant("tenant1", "domain1", 1L); MockBuildService mockBuildService = tester.buildService(); Application application = tester.controllerTester().createApplication(tenant, "app1", "default", 1L); ApplicationPackage previousApplicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .build(); ApplicationPackage newApplicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .region("eu-west-1") .build(); tester.jobCompletion(component).application(application).uploadArtifact(newApplicationPackage).submit(); tester.deployAndNotify(application, newApplicationPackage, true, JobType.systemTest); tester.deploy(JobType.stagingTest, application, previousApplicationPackage, true); tester.deployAndNotify(application, newApplicationPackage, true, JobType.stagingTest); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionCorpUsEast1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionUsCentral1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionUsWest1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionEuWest1); assertTrue("All jobs consumed", mockBuildService.jobs().isEmpty()); } @Test public void testBlockRevisionChange() { ManualClock clock = new ManualClock(Instant.parse("2017-09-26T17:30:00.00Z")); DeploymentTester tester = new DeploymentTester(new ControllerTester(clock)); ReadyJobsTrigger readyJobsTrigger = new ReadyJobsTrigger(tester.controller(), Duration.ofHours(1), new JobControl(tester.controllerTester().curator())); Version version = Version.fromString("5.0"); tester.upgradeSystem(version); ApplicationPackageBuilder applicationPackageBuilder = new ApplicationPackageBuilder() .upgradePolicy("canary") .blockChange(true, false, "tue", "18-19", "UTC") .region("us-west-1") .region("us-central-1") .region("us-east-3"); Application app = tester.createAndDeploy("app1", 1, applicationPackageBuilder.build()); tester.clock().advance(Duration.ofHours(1)); readyJobsTrigger.run(); assertEquals(0, tester.buildService().jobs().size()); String searchDefinition = "search test {\n" + " document test {\n" + " field test type string {\n" + " }\n" + " }\n" + "}\n"; ApplicationPackage changedApplication = applicationPackageBuilder.searchDefinition(searchDefinition).build(); tester.jobCompletion(component) .application(app) .nextBuildNumber() .sourceRevision(new SourceRevision("repository1", "master", "cafed00d")) .uploadArtifact(changedApplication) .submit(); assertTrue(tester.applications().require(app.id()).change().isPresent()); tester.deployAndNotify(app, changedApplication, true, systemTest); tester.deployAndNotify(app, changedApplication, true, stagingTest); readyJobsTrigger.run(); assertEquals(0, tester.buildService().jobs().size()); tester.clock().advance(Duration.ofHours(2)); tester.deploymentTrigger().triggerReadyJobs(); tester.deployAndNotify(app, changedApplication, true, stagingTest); assertEquals(singletonList(buildJob(app, productionUsWest1)), tester.buildService().jobs()); } @Test public void testCompletionOfPartOfChangeDuringBlockWindow() { ManualClock clock = new ManualClock(Instant.parse("2017-09-26T17:30:00.00Z")); DeploymentTester tester = new DeploymentTester(new ControllerTester(clock)); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .blockChange(false, true, "tue", "18", "UTC") .region("us-west-1") .region("us-east-3") .build(); Application application = tester.createAndDeploy("app1", 1, applicationPackage); Version v1 = Version.fromString("6.1"); Version v2 = Version.fromString("6.2"); tester.upgradeSystem(v2); tester.jobCompletion(component).application(application).nextBuildNumber().uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, applicationPackage, true, stagingTest); tester.deployAndNotify(application, applicationPackage, true, systemTest); clock.advance(Duration.ofHours(1)); tester.deployAndNotify(application, applicationPackage, true, productionUsWest1); assertEquals((Long) BuildJob.defaultBuildNumber, tester.application(application.id()).deploymentJobs().jobStatus() .get(productionUsWest1).lastSuccess().get().application().buildNumber().get()); assertEquals((Long) (BuildJob.defaultBuildNumber + 1), tester.application(application.id()).outstandingChange().application().get().buildNumber().get()); tester.readyJobTrigger().maintain(); assertTrue(tester.buildService().jobs().isEmpty()); tester.jobCompletion(component).application(application).nextBuildNumber().nextBuildNumber().uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, applicationPackage, true, stagingTest); tester.deployAndNotify(application, applicationPackage, true, systemTest); tester.deployAndNotify(application, applicationPackage, true, productionUsWest1); tester.deployAndNotify(application, applicationPackage, true, systemTest); tester.deployAndNotify(application, applicationPackage, true, stagingTest); tester.deployAndNotify(application, applicationPackage, true, productionUsEast3); tester.deployAndNotify(application, applicationPackage, true, systemTest); tester.deployAndNotify(application, applicationPackage, true, stagingTest); assertTrue(tester.buildService().jobs().isEmpty()); assertEquals(Change.of(v2), tester.application(application.id()).change()); clock.advance(Duration.ofHours(1)); tester.readyJobTrigger().maintain(); tester.deployAndNotify(application, applicationPackage, true, stagingTest); tester.deployAndNotify(application, applicationPackage, true, productionUsEast3); assertFalse(tester.application(application.id()).change().isPresent()); assertFalse(tester.application(application.id()).outstandingChange().isPresent()); } @Test public void testUpgradingButNoJobStarted() { DeploymentTester tester = new DeploymentTester(); ReadyJobsTrigger readyJobsTrigger = new ReadyJobsTrigger(tester.controller(), Duration.ofHours(1), new JobControl(tester.controllerTester().curator())); Application app = tester.createAndDeploy("default0", 3, "default"); tester.controller().applications().lockOrThrow(app.id(), locked -> { tester.controller().applications().store(locked.withChange(Change.of(Version.fromString("6.2")))); }); assertEquals(0, tester.buildService().jobs().size()); readyJobsTrigger.run(); tester.assertRunning(systemTest, app.id()); tester.assertRunning(stagingTest, app.id()); } @Test public void applicationVersionIsNotDowngraded() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); Supplier<Application> app = () -> tester.application(application.id()); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("us-central-1") .region("eu-west-1") .build(); tester.deployCompletely(application, applicationPackage); tester.completeDeploymentWithError(application, applicationPackage, BuildJob.defaultBuildNumber + 1, productionUsCentral1); tester.deployAndNotify(application, empty(), false, productionUsCentral1); tester.deploy(productionUsCentral1, application, empty(), false); ApplicationVersion appVersion1 = ApplicationVersion.from(BuildJob.defaultSourceRevision, BuildJob.defaultBuildNumber + 1); assertEquals(appVersion1, app.get().deployments().get(ZoneId.from("prod.us-central-1")).applicationVersion()); tester.deploymentTrigger().cancelChange(application.id(), true); assertEquals(Change.of(appVersion1), app.get().change()); tester.deploymentTrigger().cancelChange(application.id(), false); assertEquals(Change.empty(), app.get().change()); Version version1 = new Version("6.2"); tester.upgradeSystem(version1); tester.jobCompletion(productionUsCentral1).application(application).unsuccessful().submit(); tester.deployAndNotify(application, empty(), true, systemTest); tester.deployAndNotify(application, empty(), true, stagingTest); tester.deployAndNotify(application, empty(), false, productionUsCentral1); tester.deployAndNotify(application, empty(), true, systemTest); tester.deployAndNotify(application, empty(), true, stagingTest); tester.deployAndNotify(application, empty(), true, productionUsCentral1); tester.deployAndNotify(application, empty(), true, productionEuWest1); assertEquals(appVersion1, app.get().deployments().get(ZoneId.from("prod.us-central-1")).applicationVersion()); } @Test @Test public void eachDeployTargetIsTested() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); Supplier<Application> app = () -> tester.application(application.id()); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .parallel("eu-west-1", "us-east-3") .build(); tester.deployCompletely(application, applicationPackage); Version v1 = new Version("6.1"); Version v2 = new Version("6.2"); tester.upgradeSystem(v2); tester.deployAndNotify(application, empty(), true, systemTest); tester.deployAndNotify(application, empty(), true, stagingTest); tester.deploymentTrigger().cancelChange(application.id(), true); tester.deploy(productionEuWest1, application, applicationPackage); assertEquals(v2, app.get().deployments().get(productionEuWest1.zone(main).get()).version()); assertEquals(v1, app.get().deployments().get(productionUsEast3.zone(main).get()).version()); tester.jobCompletion(component).application(application).nextBuildNumber().uploadArtifact(applicationPackage).submit(); Version firstTested = app.get().deploymentJobs().jobStatus().get(systemTest).lastTriggered().get().platform(); assertEquals(firstTested, app.get().deploymentJobs().jobStatus().get(stagingTest).lastTriggered().get().platform()); tester.deployAndNotify(application, empty(), true, systemTest); tester.deployAndNotify(application, empty(), true, stagingTest); assertEquals(firstTested, app.get().deploymentJobs().jobStatus().get(systemTest).lastTriggered().get().platform()); assertEquals(firstTested, app.get().deploymentJobs().jobStatus().get(stagingTest).lastTriggered().get().platform()); tester.deployAndNotify(application, applicationPackage, false, productionEuWest1); tester.deployAndNotify(application, applicationPackage, false, productionUsEast3); tester.triggerUntilQuiescence(); assertNotEquals(firstTested, app.get().deploymentJobs().jobStatus().get(systemTest).lastTriggered().get().platform()); assertNotEquals(firstTested, app.get().deploymentJobs().jobStatus().get(stagingTest).lastTriggered().get().platform()); tester.deployAndNotify(application, empty(), true, systemTest); tester.deployAndNotify(application, empty(), true, stagingTest); tester.deployAndNotify(application, empty(), false, productionEuWest1); tester.deployAndNotify(application, empty(), false, productionUsEast3); tester.deployAndNotify(application, empty(), true, productionUsEast3); tester.deployAndNotify(application, empty(), true, productionEuWest1); assertFalse(app.get().change().isPresent()); assertEquals(43, app.get().deploymentJobs().jobStatus().get(productionEuWest1).lastSuccess().get().application().buildNumber().get().longValue()); assertEquals(43, app.get().deploymentJobs().jobStatus().get(productionUsEast3).lastSuccess().get().application().buildNumber().get().longValue()); } @Test public void eachDifferentUpgradeCombinationIsTested() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); Supplier<Application> app = () -> tester.application(application.id()); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("us-central-1") .parallel("eu-west-1", "us-east-3") .build(); tester.deployCompletely(application, applicationPackage); Version v1 = new Version("6.1"); Version v2 = new Version("6.2"); tester.upgradeSystem(v2); tester.deployAndNotify(application, empty(), true, systemTest); tester.deployAndNotify(application, empty(), true, stagingTest); tester.deployAndNotify(application, empty(), true, productionUsCentral1); tester.deployAndNotify(application, empty(), true, productionEuWest1); tester.deployAndNotify(application, empty(), false, productionUsEast3); assertEquals(v2, app.get().deployments().get(ZoneId.from("prod", "us-central-1")).version()); assertEquals(v2, app.get().deployments().get(ZoneId.from("prod", "eu-west-1")).version()); assertEquals(v1, app.get().deployments().get(ZoneId.from("prod", "us-east-3")).version()); Version v3 = new Version("6.3"); tester.upgradeSystem(v3); tester.deployAndNotify(application, empty(), false, productionUsEast3); tester.deployAndNotify(application, empty(), true, systemTest); tester.deployAndNotify(application, empty(), true, stagingTest); assertEquals(v2, app.get().deploymentJobs().jobStatus().get(stagingTest).lastSuccess().get().sourcePlatform().get()); tester.deployAndNotify(application, empty(), true, productionUsCentral1); assertEquals(v1, app.get().deploymentJobs().jobStatus().get(stagingTest).lastTriggered().get().sourcePlatform().get()); tester.deployAndNotify(application, empty(), true, stagingTest); tester.deployAndNotify(application, empty(), true, productionEuWest1); tester.deployAndNotify(application, empty(), true, productionUsEast3); } }
class DeploymentTriggerTest { @Test public void testTriggerFailing() { DeploymentTester tester = new DeploymentTester(); Application app = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .upgradePolicy("default") .environment(Environment.prod) .region("us-west-1") .build(); Version version = new Version(5, 1); tester.upgradeSystem(version); tester.jobCompletion(component).application(app).uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.systemTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.stagingTest); tester.deployAndNotify(app, applicationPackage, true, JobType.productionUsWest1); version = new Version(5, 2); tester.upgradeSystem(version); tester.buildService().remove(buildJob(app, stagingTest)); tester.readyJobTrigger().maintain(); assertEquals("Retried dead job", 2, tester.buildService().jobs().size()); tester.assertRunning(stagingTest, app.id()); tester.deployAndNotify(app, applicationPackage, true, stagingTest); tester.assertRunning(systemTest, app.id()); assertEquals(1, tester.buildService().jobs().size()); tester.deployAndNotify(app, applicationPackage, false, JobType.systemTest); assertEquals("Job is retried on failure", 1, tester.buildService().jobs().size()); tester.deployAndNotify(app, applicationPackage, true, JobType.systemTest); tester.assertRunning(productionUsWest1, app.id()); } @Test public void deploymentSpecDecidesTriggerOrder() { DeploymentTester tester = new DeploymentTester(); TenantName tenant = tester.controllerTester().createTenant("tenant1", "domain1", 1L); MockBuildService mockBuildService = tester.buildService(); Application application = tester.controllerTester().createApplication(tenant, "app1", "default", 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .build(); tester.jobCompletion(component).application(application).uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); tester.deployAndNotify(application, applicationPackage, true, JobType.productionCorpUsEast1); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsWest1); assertTrue("All jobs consumed", mockBuildService.jobs().isEmpty()); } @Test public void deploymentsSpecWithDelays() { DeploymentTester tester = new DeploymentTester(); MockBuildService mockBuildService = tester.buildService(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .delay(Duration.ofSeconds(30)) .region("us-west-1") .delay(Duration.ofMinutes(2)) .delay(Duration.ofMinutes(2)) .region("us-central-1") .delay(Duration.ofMinutes(10)) .build(); tester.jobCompletion(component).application(application).uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); tester.deploymentTrigger().triggerReadyJobs(); assertEquals(0, mockBuildService.jobs().size()); tester.clock().advance(Duration.ofSeconds(30)); tester.deploymentTrigger().triggerReadyJobs(); assertEquals(1, mockBuildService.jobs().size()); tester.assertRunning(productionUsWest1, application.id()); tester.clock().advance(Duration.ofMinutes(3)); tester.deploymentTrigger().triggerReadyJobs(); assertEquals(1, mockBuildService.jobs().size()); tester.assertRunning(productionUsWest1, application.id()); tester.deployAndNotify(application, applicationPackage, true, productionUsWest1); tester.deploymentTrigger().triggerReadyJobs(); assertTrue("No more jobs triggered at this time", mockBuildService.jobs().isEmpty()); tester.clock().advance(Duration.ofMinutes(3)); tester.deploymentTrigger().triggerReadyJobs(); assertTrue("No more jobs triggered at this time", mockBuildService.jobs().isEmpty()); tester.clock().advance(Duration.ofMinutes(1)); tester.deploymentTrigger().triggerReadyJobs(); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); assertTrue("All jobs consumed", mockBuildService.jobs().isEmpty()); tester.clock().advance(Duration.ofMinutes(10)); tester.deploymentTrigger().triggerReadyJobs(); assertTrue("All jobs consumed", mockBuildService.jobs().isEmpty()); } @Test public void deploymentSpecWithParallelDeployments() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("us-central-1") .parallel("us-west-1", "us-east-3") .region("eu-west-1") .build(); tester.jobCompletion(component).application(application).uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); assertEquals(1, tester.buildService().jobs().size()); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); assertEquals(2, tester.buildService().jobs().size()); tester.assertRunning(productionUsEast3, application.id()); tester.assertRunning(productionUsWest1, application.id()); tester.deploy(JobType.productionUsWest1, application, applicationPackage, false); tester.jobCompletion(JobType.productionUsWest1).application(application).submit(); assertEquals("One job still running.", JobType.productionUsEast3.jobName(), tester.buildService().jobs().get(0).jobName()); tester.deploy(JobType.productionUsEast3, application, applicationPackage, false); tester.jobCompletion(JobType.productionUsEast3).application(application).submit(); assertEquals(1, tester.buildService().jobs().size()); tester.deployAndNotify(application, applicationPackage, true, JobType.productionEuWest1); assertTrue("All jobs consumed", tester.buildService().jobs().isEmpty()); } @Test public void parallelDeploymentCompletesOutOfOrder() { DeploymentTester tester = new DeploymentTester(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .parallel("us-east-3", "us-west-1") .build(); Application app = tester.createApplication("app1", "tenant1", 1, 11L); tester.jobCompletion(component).application(app).uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.systemTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.stagingTest); tester.deploy(DeploymentJobs.JobType.productionUsWest1, app, applicationPackage); tester.jobCompletion(DeploymentJobs.JobType.productionUsWest1).application(app).submit(); assertTrue("Change is present as not all jobs are complete", tester.applications().require(app.id()).change().isPresent()); tester.deploy(DeploymentJobs.JobType.productionUsEast3, app, applicationPackage); tester.jobCompletion(JobType.productionUsEast3).application(app).submit(); assertFalse("Change has been deployed", tester.applications().require(app.id()).change().isPresent()); } @Test public void testSuccessfulDeploymentApplicationPackageChanged() { DeploymentTester tester = new DeploymentTester(); TenantName tenant = tester.controllerTester().createTenant("tenant1", "domain1", 1L); MockBuildService mockBuildService = tester.buildService(); Application application = tester.controllerTester().createApplication(tenant, "app1", "default", 1L); ApplicationPackage previousApplicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .build(); ApplicationPackage newApplicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .region("eu-west-1") .build(); tester.jobCompletion(component).application(application).uploadArtifact(newApplicationPackage).submit(); tester.deployAndNotify(application, newApplicationPackage, true, JobType.systemTest); tester.deploy(JobType.stagingTest, application, previousApplicationPackage, true); tester.deployAndNotify(application, newApplicationPackage, true, JobType.stagingTest); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionCorpUsEast1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionUsCentral1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionUsWest1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionEuWest1); assertTrue("All jobs consumed", mockBuildService.jobs().isEmpty()); } @Test public void testBlockRevisionChange() { ManualClock clock = new ManualClock(Instant.parse("2017-09-26T17:30:00.00Z")); DeploymentTester tester = new DeploymentTester(new ControllerTester(clock)); ReadyJobsTrigger readyJobsTrigger = new ReadyJobsTrigger(tester.controller(), Duration.ofHours(1), new JobControl(tester.controllerTester().curator())); Version version = Version.fromString("5.0"); tester.upgradeSystem(version); ApplicationPackageBuilder applicationPackageBuilder = new ApplicationPackageBuilder() .upgradePolicy("canary") .blockChange(true, false, "tue", "18-19", "UTC") .region("us-west-1") .region("us-central-1") .region("us-east-3"); Application app = tester.createAndDeploy("app1", 1, applicationPackageBuilder.build()); tester.clock().advance(Duration.ofHours(1)); readyJobsTrigger.run(); assertEquals(0, tester.buildService().jobs().size()); String searchDefinition = "search test {\n" + " document test {\n" + " field test type string {\n" + " }\n" + " }\n" + "}\n"; ApplicationPackage changedApplication = applicationPackageBuilder.searchDefinition(searchDefinition).build(); tester.jobCompletion(component) .application(app) .nextBuildNumber() .sourceRevision(new SourceRevision("repository1", "master", "cafed00d")) .uploadArtifact(changedApplication) .submit(); assertTrue(tester.applications().require(app.id()).change().isPresent()); tester.deployAndNotify(app, changedApplication, true, systemTest); tester.deployAndNotify(app, changedApplication, true, stagingTest); readyJobsTrigger.run(); assertEquals(0, tester.buildService().jobs().size()); tester.clock().advance(Duration.ofHours(2)); tester.deploymentTrigger().triggerReadyJobs(); tester.deployAndNotify(app, changedApplication, true, stagingTest); assertEquals(singletonList(buildJob(app, productionUsWest1)), tester.buildService().jobs()); } @Test public void testCompletionOfPartOfChangeDuringBlockWindow() { ManualClock clock = new ManualClock(Instant.parse("2017-09-26T17:30:00.00Z")); DeploymentTester tester = new DeploymentTester(new ControllerTester(clock)); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .blockChange(false, true, "tue", "18", "UTC") .region("us-west-1") .region("us-east-3") .build(); Application application = tester.createAndDeploy("app1", 1, applicationPackage); Version v1 = Version.fromString("6.1"); Version v2 = Version.fromString("6.2"); tester.upgradeSystem(v2); tester.jobCompletion(component).application(application).nextBuildNumber().uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, applicationPackage, true, stagingTest); tester.deployAndNotify(application, applicationPackage, true, systemTest); clock.advance(Duration.ofHours(1)); tester.deployAndNotify(application, applicationPackage, true, productionUsWest1); assertEquals((Long) BuildJob.defaultBuildNumber, tester.application(application.id()).deploymentJobs().jobStatus() .get(productionUsWest1).lastSuccess().get().application().buildNumber().get()); assertEquals((Long) (BuildJob.defaultBuildNumber + 1), tester.application(application.id()).outstandingChange().application().get().buildNumber().get()); tester.readyJobTrigger().maintain(); assertTrue(tester.buildService().jobs().isEmpty()); tester.jobCompletion(component).application(application).nextBuildNumber().nextBuildNumber().uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, applicationPackage, true, stagingTest); tester.deployAndNotify(application, applicationPackage, true, systemTest); tester.deployAndNotify(application, applicationPackage, true, productionUsWest1); tester.deployAndNotify(application, applicationPackage, true, systemTest); tester.deployAndNotify(application, applicationPackage, true, stagingTest); tester.deployAndNotify(application, applicationPackage, true, productionUsEast3); tester.deployAndNotify(application, applicationPackage, true, systemTest); tester.deployAndNotify(application, applicationPackage, true, stagingTest); assertTrue(tester.buildService().jobs().isEmpty()); assertEquals(Change.of(v2), tester.application(application.id()).change()); clock.advance(Duration.ofHours(1)); tester.readyJobTrigger().maintain(); tester.deployAndNotify(application, applicationPackage, true, stagingTest); tester.deployAndNotify(application, applicationPackage, true, productionUsEast3); assertFalse(tester.application(application.id()).change().isPresent()); assertFalse(tester.application(application.id()).outstandingChange().isPresent()); } @Test public void testUpgradingButNoJobStarted() { DeploymentTester tester = new DeploymentTester(); ReadyJobsTrigger readyJobsTrigger = new ReadyJobsTrigger(tester.controller(), Duration.ofHours(1), new JobControl(tester.controllerTester().curator())); Application app = tester.createAndDeploy("default0", 3, "default"); tester.controller().applications().lockOrThrow(app.id(), locked -> { tester.controller().applications().store(locked.withChange(Change.of(Version.fromString("6.2")))); }); assertEquals(0, tester.buildService().jobs().size()); readyJobsTrigger.run(); tester.assertRunning(systemTest, app.id()); tester.assertRunning(stagingTest, app.id()); } @Test public void applicationVersionIsNotDowngraded() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); Supplier<Application> app = () -> tester.application(application.id()); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("us-central-1") .region("eu-west-1") .build(); tester.deployCompletely(application, applicationPackage); tester.completeDeploymentWithError(application, applicationPackage, BuildJob.defaultBuildNumber + 1, productionUsCentral1); tester.deployAndNotify(application, false, productionUsCentral1); tester.deploy(productionUsCentral1, application, Optional.empty(), false); ApplicationVersion appVersion1 = ApplicationVersion.from(BuildJob.defaultSourceRevision, BuildJob.defaultBuildNumber + 1); assertEquals(appVersion1, app.get().deployments().get(ZoneId.from("prod.us-central-1")).applicationVersion()); tester.deploymentTrigger().cancelChange(application.id(), true); assertEquals(Change.of(appVersion1), app.get().change()); tester.deploymentTrigger().cancelChange(application.id(), false); assertEquals(Change.empty(), app.get().change()); Version version1 = new Version("6.2"); tester.upgradeSystem(version1); tester.jobCompletion(productionUsCentral1).application(application).unsuccessful().submit(); tester.deployAndNotify(application, true, systemTest); tester.deployAndNotify(application, true, stagingTest); tester.deployAndNotify(application, false, productionUsCentral1); tester.deployAndNotify(application, true, systemTest); tester.deployAndNotify(application, true, stagingTest); tester.deployAndNotify(application, true, productionUsCentral1); tester.deployAndNotify(application, true, productionEuWest1); assertEquals(appVersion1, app.get().deployments().get(ZoneId.from("prod.us-central-1")).applicationVersion()); } @Test @Test public void eachDeployTargetIsTested() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); Supplier<Application> app = () -> tester.application(application.id()); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .parallel("eu-west-1", "us-east-3") .build(); tester.deployCompletely(application, applicationPackage); Version v1 = new Version("6.1"); Version v2 = new Version("6.2"); tester.upgradeSystem(v2); tester.deployAndNotify(application, true, systemTest); tester.deployAndNotify(application, true, stagingTest); tester.deploymentTrigger().cancelChange(application.id(), true); tester.deploy(productionEuWest1, application, applicationPackage); assertEquals(v2, app.get().deployments().get(productionEuWest1.zone(main).get()).version()); assertEquals(v1, app.get().deployments().get(productionUsEast3.zone(main).get()).version()); tester.jobCompletion(component).application(application).nextBuildNumber().uploadArtifact(applicationPackage).submit(); Version firstTested = app.get().deploymentJobs().jobStatus().get(systemTest).lastTriggered().get().platform(); assertEquals(firstTested, app.get().deploymentJobs().jobStatus().get(stagingTest).lastTriggered().get().platform()); tester.deployAndNotify(application, true, systemTest); tester.deployAndNotify(application, true, stagingTest); assertEquals(firstTested, app.get().deploymentJobs().jobStatus().get(systemTest).lastTriggered().get().platform()); assertEquals(firstTested, app.get().deploymentJobs().jobStatus().get(stagingTest).lastTriggered().get().platform()); tester.deployAndNotify(application, applicationPackage, false, productionEuWest1); tester.deployAndNotify(application, applicationPackage, false, productionUsEast3); tester.triggerUntilQuiescence(); assertNotEquals(firstTested, app.get().deploymentJobs().jobStatus().get(systemTest).lastTriggered().get().platform()); assertNotEquals(firstTested, app.get().deploymentJobs().jobStatus().get(stagingTest).lastTriggered().get().platform()); tester.deployAndNotify(application, true, systemTest); tester.deployAndNotify(application, true, stagingTest); tester.deployAndNotify(application, false, productionEuWest1); tester.deployAndNotify(application, false, productionUsEast3); tester.deployAndNotify(application, true, productionUsEast3); tester.deployAndNotify(application, true, productionEuWest1); assertFalse(app.get().change().isPresent()); assertEquals(43, app.get().deploymentJobs().jobStatus().get(productionEuWest1).lastSuccess().get().application().buildNumber().get().longValue()); assertEquals(43, app.get().deploymentJobs().jobStatus().get(productionUsEast3).lastSuccess().get().application().buildNumber().get().longValue()); } @Test public void eachDifferentUpgradeCombinationIsTested() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); Supplier<Application> app = () -> tester.application(application.id()); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("us-central-1") .parallel("eu-west-1", "us-east-3") .build(); tester.deployCompletely(application, applicationPackage); Version v1 = new Version("6.1"); Version v2 = new Version("6.2"); tester.upgradeSystem(v2); tester.deployAndNotify(application, true, systemTest); tester.deployAndNotify(application, true, stagingTest); tester.deployAndNotify(application, true, productionUsCentral1); tester.deployAndNotify(application, true, productionEuWest1); tester.deployAndNotify(application, false, productionUsEast3); assertEquals(v2, app.get().deployments().get(ZoneId.from("prod", "us-central-1")).version()); assertEquals(v2, app.get().deployments().get(ZoneId.from("prod", "eu-west-1")).version()); assertEquals(v1, app.get().deployments().get(ZoneId.from("prod", "us-east-3")).version()); Version v3 = new Version("6.3"); tester.upgradeSystem(v3); tester.deployAndNotify(application, false, productionUsEast3); tester.deployAndNotify(application, true, systemTest); tester.deployAndNotify(application, true, stagingTest); assertEquals(v2, app.get().deploymentJobs().jobStatus().get(stagingTest).lastSuccess().get().sourcePlatform().get()); tester.deployAndNotify(application, true, productionUsCentral1); assertEquals(v1, app.get().deploymentJobs().jobStatus().get(stagingTest).lastTriggered().get().sourcePlatform().get()); tester.deployAndNotify(application, true, stagingTest); tester.deployAndNotify(application, true, productionEuWest1); tester.deployAndNotify(application, true, productionUsEast3); } @Test public void retriesFailingJobs() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("us-central-1") .build(); tester.deployCompletely(application, applicationPackage); tester.jobCompletion(component).application(application).nextBuildNumber().uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, false, systemTest); tester.deployAndNotify(application, true, stagingTest); tester.clock().advance(Duration.ofSeconds(59)); tester.jobCompletion(systemTest).application(application).unsuccessful().submit(); tester.readyJobTrigger().maintain(); tester.assertRunning(systemTest, application.id()); tester.clock().advance(Duration.ofSeconds(1)); tester.jobCompletion(systemTest).application(application).unsuccessful().submit(); tester.readyJobTrigger().maintain(); tester.assertNotRunning(systemTest, application.id()); tester.clock().advance(Duration.ofMinutes(10).plus(Duration.ofSeconds(1))); tester.readyJobTrigger().maintain(); tester.assertRunning(systemTest, application.id()); tester.clock().advance(Duration.ofMinutes(50)); tester.jobCompletion(systemTest).application(application).unsuccessful().submit(); tester.readyJobTrigger().maintain(); tester.assertNotRunning(systemTest, application.id()); tester.clock().advance(Duration.ofHours(2).plus(Duration.ofSeconds(1))); tester.readyJobTrigger().maintain(); tester.assertRunning(systemTest, application.id()); tester.jobCompletion(systemTest).application(application).unsuccessful().submit(); tester.readyJobTrigger().maintain(); tester.assertNotRunning(systemTest, application.id()); tester.jobCompletion(component).application(application).nextBuildNumber(2).uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, true, systemTest); tester.deployAndNotify(application, true, stagingTest); tester.deployAndNotify(application, true, productionUsCentral1); assertTrue("Deployment completed", tester.buildService().jobs().isEmpty()); } }
Yes, it's not pretty, that's why I added a method for it. I think the above is better as your suggestion creates an unnecessary object.
public boolean isOutOfCapacity() { return jobError.filter(error -> error == DeploymentJobs.JobError.outOfCapacity).isPresent(); }
return jobError.filter(error -> error == DeploymentJobs.JobError.outOfCapacity).isPresent();
public boolean isOutOfCapacity() { return jobError.filter(error -> error == DeploymentJobs.JobError.outOfCapacity).isPresent(); }
class JobStatus { private final DeploymentJobs.JobType type; private final Optional<JobRun> lastTriggered; private final Optional<JobRun> lastCompleted; private final Optional<JobRun> firstFailing; private final Optional<JobRun> lastSuccess; private final Optional<DeploymentJobs.JobError> jobError; /** * Used by the persistence layer (only) to create a complete JobStatus instance. * Other creation should be by using initial- and with- methods. */ public JobStatus(DeploymentJobs.JobType type, Optional<DeploymentJobs.JobError> jobError, Optional<JobRun> lastTriggered, Optional<JobRun> lastCompleted, Optional<JobRun> firstFailing, Optional<JobRun> lastSuccess) { requireNonNull(type, "jobType cannot be null"); requireNonNull(jobError, "jobError cannot be null"); requireNonNull(lastTriggered, "lastTriggered cannot be null"); requireNonNull(lastCompleted, "lastCompleted cannot be null"); requireNonNull(firstFailing, "firstFailing cannot be null"); requireNonNull(lastSuccess, "lastSuccess cannot be null"); this.type = type; this.jobError = jobError; this.lastTriggered = type == DeploymentJobs.JobType.component ? Optional.empty() : lastTriggered; this.lastCompleted = lastCompleted; this.firstFailing = firstFailing; this.lastSuccess = lastSuccess; } /** Returns an empty job status */ public static JobStatus initial(DeploymentJobs.JobType type) { return new JobStatus(type, Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty()); } public JobStatus withTriggering(Version platform, ApplicationVersion application, Optional<Deployment> deployment, String reason, Instant triggeredAt) { return withTriggering(JobRun.triggering(platform, application, deployment.map(Deployment::version), deployment.map(Deployment::applicationVersion), reason, triggeredAt)); } public JobStatus withTriggering(JobRun jobRun) { return new JobStatus(type, jobError, Optional.of(jobRun), lastCompleted, firstFailing, lastSuccess); } public JobStatus withCompletion(long runId, Optional<DeploymentJobs.JobError> jobError, Instant completion) { return withCompletion(lastTriggered.get().completion(runId, completion), jobError); } public JobStatus withCompletion(JobRun completion, Optional<DeploymentJobs.JobError> jobError) { Optional<JobRun> firstFailing = this.firstFailing; if (jobError.isPresent() && ! this.firstFailing.isPresent()) firstFailing = Optional.of(completion); Optional<JobRun> lastSuccess = this.lastSuccess; if ( ! jobError.isPresent()) { lastSuccess = Optional.of(completion); firstFailing = Optional.empty(); } return new JobStatus(type, jobError, lastTriggered, Optional.of(completion), firstFailing, lastSuccess); } public DeploymentJobs.JobType type() { return type; } /** Returns true unless this job last completed with a failure */ public boolean isSuccess() { return lastCompleted().isPresent() && ! jobError.isPresent(); } /** The error of the last completion, or empty if the last run succeeded */ public Optional<DeploymentJobs.JobError> jobError() { return jobError; } /** Returns whether this last failed on out of capacity */ /** * Returns the last triggering of this job, or empty if the controller has never triggered it * and not seen a deployment for it */ public Optional<JobRun> lastTriggered() { return lastTriggered; } /** Returns the last completion of this job (whether failing or succeeding), or empty if it never completed */ public Optional<JobRun> lastCompleted() { return lastCompleted; } /** Returns the run when this started failing, or empty if it is not currently failing */ public Optional<JobRun> firstFailing() { return firstFailing; } /** Returns the run when this last succeeded, or empty if it has never succeeded */ public Optional<JobRun> lastSuccess() { return lastSuccess; } @Override public String toString() { return "job status of " + type + "[ " + "last triggered: " + lastTriggered.map(JobRun::toString).orElse("(never)") + ", last completed: " + lastCompleted.map(JobRun::toString).orElse("(never)") + ", first failing: " + firstFailing.map(JobRun::toString).orElse("(not failing)") + ", lastSuccess: " + lastSuccess.map(JobRun::toString).orElse("(never)") + "]"; } @Override public int hashCode() { return Objects.hash(type, jobError, lastTriggered, lastCompleted, firstFailing, lastSuccess); } @Override public boolean equals(Object o) { if (o == this) return true; if ( ! ( o instanceof JobStatus)) return false; JobStatus other = (JobStatus)o; return Objects.equals(type, other.type) && Objects.equals(jobError, other.jobError) && Objects.equals(lastTriggered, other.lastTriggered) && Objects.equals(lastCompleted, other.lastCompleted) && Objects.equals(firstFailing, other.firstFailing) && Objects.equals(lastSuccess, other.lastSuccess); } /** Information about a particular triggering or completion of a run of a job. This is immutable. */ public static class JobRun { private final long id; private final Version platform; private final ApplicationVersion application; private final Optional<Version> sourcePlatform; private final Optional<ApplicationVersion> sourceApplication; private final String reason; private final Instant at; public JobRun(long id, Version platform, ApplicationVersion application, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication, String reason, Instant at) { this.id = id; this.platform = requireNonNull(platform); this.application = requireNonNull(application); this.sourcePlatform = sourcePlatform; this.sourceApplication = sourceApplication; this.reason = requireNonNull(reason); this.at = requireNonNull(at); } public static JobRun triggering(Version platform, ApplicationVersion application, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication, String reason, Instant at) { return new JobRun(-1, platform, application, sourcePlatform, sourceApplication, reason, at); } public JobRun completion(long id, Instant at) { return new JobRun(id, platform, application, sourcePlatform, sourceApplication, reason, at); } /** Returns the id of this run of this job, or -1 if not known */ public long id() { return id; } /** Returns the Vespa version used on this run */ public Version platform() { return platform; } /** Returns the Vespa version this run upgraded from, if already deployed */ public Optional<Version> sourcePlatform() { return sourcePlatform; } /** Returns the application version used in this run */ public ApplicationVersion application() { return application; } /** Returns the application version this run upgraded from, if already deployed */ public Optional<ApplicationVersion> sourceApplication() { return sourceApplication; } /** Returns a human-readable reason for this particular job run */ public String reason() { return reason; } /** Returns the time if this triggering or completion */ public Instant at() { return at; } @Override public String toString() { return "job run " + id + " of version " + platform + (sourcePlatform.map(version -> " (" + version + ")").orElse("")) + " " + application.id() + (sourceApplication.map(version -> " (" + version.id() + ")").orElse("")) + " at " + at; } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof JobRun)) return false; JobRun run = (JobRun) o; if (id != run.id) return false; if (!platform.equals(run.platform)) return false; if (!application.equals(run.application)) return false; if (!sourcePlatform.equals(run.sourcePlatform)) return false; if (!sourceApplication.equals(run.sourceApplication)) return false; return at.equals(run.at); } @Override public int hashCode() { int result = (int) (id ^ (id >>> 32)); result = 31 * result + platform.hashCode(); result = 31 * result + application.hashCode(); result = 31 * result + sourcePlatform.hashCode(); result = 31 * result + sourceApplication.hashCode(); result = 31 * result + at.hashCode(); return result; } } }
class JobStatus { private final DeploymentJobs.JobType type; private final Optional<JobRun> lastTriggered; private final Optional<JobRun> lastCompleted; private final Optional<JobRun> firstFailing; private final Optional<JobRun> lastSuccess; private final Optional<DeploymentJobs.JobError> jobError; /** * Used by the persistence layer (only) to create a complete JobStatus instance. * Other creation should be by using initial- and with- methods. */ public JobStatus(DeploymentJobs.JobType type, Optional<DeploymentJobs.JobError> jobError, Optional<JobRun> lastTriggered, Optional<JobRun> lastCompleted, Optional<JobRun> firstFailing, Optional<JobRun> lastSuccess) { requireNonNull(type, "jobType cannot be null"); requireNonNull(jobError, "jobError cannot be null"); requireNonNull(lastTriggered, "lastTriggered cannot be null"); requireNonNull(lastCompleted, "lastCompleted cannot be null"); requireNonNull(firstFailing, "firstFailing cannot be null"); requireNonNull(lastSuccess, "lastSuccess cannot be null"); this.type = type; this.jobError = jobError; this.lastTriggered = type == DeploymentJobs.JobType.component ? Optional.empty() : lastTriggered; this.lastCompleted = lastCompleted; this.firstFailing = firstFailing; this.lastSuccess = lastSuccess; } /** Returns an empty job status */ public static JobStatus initial(DeploymentJobs.JobType type) { return new JobStatus(type, Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty()); } public JobStatus withTriggering(Version platform, ApplicationVersion application, Optional<Deployment> deployment, String reason, Instant triggeredAt) { return withTriggering(JobRun.triggering(platform, application, deployment.map(Deployment::version), deployment.map(Deployment::applicationVersion), reason, triggeredAt)); } public JobStatus withTriggering(JobRun jobRun) { return new JobStatus(type, jobError, Optional.of(jobRun), lastCompleted, firstFailing, lastSuccess); } public JobStatus withCompletion(long runId, Optional<DeploymentJobs.JobError> jobError, Instant completion) { return withCompletion(lastTriggered.get().completion(runId, completion), jobError); } public JobStatus withCompletion(JobRun completion, Optional<DeploymentJobs.JobError> jobError) { Optional<JobRun> firstFailing = this.firstFailing; if (jobError.isPresent() && ! this.firstFailing.isPresent()) firstFailing = Optional.of(completion); Optional<JobRun> lastSuccess = this.lastSuccess; if ( ! jobError.isPresent()) { lastSuccess = Optional.of(completion); firstFailing = Optional.empty(); } return new JobStatus(type, jobError, lastTriggered, Optional.of(completion), firstFailing, lastSuccess); } public DeploymentJobs.JobType type() { return type; } /** Returns true unless this job last completed with a failure */ public boolean isSuccess() { return lastCompleted().isPresent() && ! jobError.isPresent(); } /** The error of the last completion, or empty if the last run succeeded */ public Optional<DeploymentJobs.JobError> jobError() { return jobError; } /** Returns whether this last failed on out of capacity */ /** * Returns the last triggering of this job, or empty if the controller has never triggered it * and not seen a deployment for it */ public Optional<JobRun> lastTriggered() { return lastTriggered; } /** Returns the last completion of this job (whether failing or succeeding), or empty if it never completed */ public Optional<JobRun> lastCompleted() { return lastCompleted; } /** Returns the run when this started failing, or empty if it is not currently failing */ public Optional<JobRun> firstFailing() { return firstFailing; } /** Returns the run when this last succeeded, or empty if it has never succeeded */ public Optional<JobRun> lastSuccess() { return lastSuccess; } @Override public String toString() { return "job status of " + type + "[ " + "last triggered: " + lastTriggered.map(JobRun::toString).orElse("(never)") + ", last completed: " + lastCompleted.map(JobRun::toString).orElse("(never)") + ", first failing: " + firstFailing.map(JobRun::toString).orElse("(not failing)") + ", lastSuccess: " + lastSuccess.map(JobRun::toString).orElse("(never)") + "]"; } @Override public int hashCode() { return Objects.hash(type, jobError, lastTriggered, lastCompleted, firstFailing, lastSuccess); } @Override public boolean equals(Object o) { if (o == this) return true; if ( ! ( o instanceof JobStatus)) return false; JobStatus other = (JobStatus)o; return Objects.equals(type, other.type) && Objects.equals(jobError, other.jobError) && Objects.equals(lastTriggered, other.lastTriggered) && Objects.equals(lastCompleted, other.lastCompleted) && Objects.equals(firstFailing, other.firstFailing) && Objects.equals(lastSuccess, other.lastSuccess); } /** Information about a particular triggering or completion of a run of a job. This is immutable. */ public static class JobRun { private final long id; private final Version platform; private final ApplicationVersion application; private final Optional<Version> sourcePlatform; private final Optional<ApplicationVersion> sourceApplication; private final String reason; private final Instant at; public JobRun(long id, Version platform, ApplicationVersion application, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication, String reason, Instant at) { this.id = id; this.platform = requireNonNull(platform); this.application = requireNonNull(application); this.sourcePlatform = sourcePlatform; this.sourceApplication = sourceApplication; this.reason = requireNonNull(reason); this.at = requireNonNull(at); } public static JobRun triggering(Version platform, ApplicationVersion application, Optional<Version> sourcePlatform, Optional<ApplicationVersion> sourceApplication, String reason, Instant at) { return new JobRun(-1, platform, application, sourcePlatform, sourceApplication, reason, at); } public JobRun completion(long id, Instant at) { return new JobRun(id, platform, application, sourcePlatform, sourceApplication, reason, at); } /** Returns the id of this run of this job, or -1 if not known */ public long id() { return id; } /** Returns the Vespa version used on this run */ public Version platform() { return platform; } /** Returns the Vespa version this run upgraded from, if already deployed */ public Optional<Version> sourcePlatform() { return sourcePlatform; } /** Returns the application version used in this run */ public ApplicationVersion application() { return application; } /** Returns the application version this run upgraded from, if already deployed */ public Optional<ApplicationVersion> sourceApplication() { return sourceApplication; } /** Returns a human-readable reason for this particular job run */ public String reason() { return reason; } /** Returns the time if this triggering or completion */ public Instant at() { return at; } @Override public String toString() { return "job run " + id + " of version " + platform + (sourcePlatform.map(version -> " (" + version + ")").orElse("")) + " " + application.id() + (sourceApplication.map(version -> " (" + version.id() + ")").orElse("")) + " at " + at; } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof JobRun)) return false; JobRun run = (JobRun) o; if (id != run.id) return false; if (!platform.equals(run.platform)) return false; if (!application.equals(run.application)) return false; if (!sourcePlatform.equals(run.sourcePlatform)) return false; if (!sourceApplication.equals(run.sourceApplication)) return false; return at.equals(run.at); } @Override public int hashCode() { int result = (int) (id ^ (id >>> 32)); result = 31 * result + platform.hashCode(); result = 31 * result + application.hashCode(); result = 31 * result + sourcePlatform.hashCode(); result = 31 * result + sourceApplication.hashCode(); result = 31 * result + at.hashCode(); return result; } } }
Sure.
public void stepIsCompletePreciselyWhenItShouldBe() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); Supplier<Application> app = () -> tester.application(application.id()); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("us-central-1") .region("eu-west-1") .upgradePolicy("canary") .build(); tester.deployCompletely(application, applicationPackage); Version v2 = new Version("7.2"); tester.upgradeSystem(v2); tester.completeUpgradeWithError(application, v2, applicationPackage, productionUsCentral1); tester.deploy(productionUsCentral1, application, applicationPackage); tester.deployAndNotify(application, applicationPackage, false, productionUsCentral1); assertEquals(v2, app.get().deployments().get(productionUsCentral1.zone(main).get()).version()); tester.deploymentTrigger().cancelChange(application.id(), false); tester.deployAndNotify(application, applicationPackage, false, productionUsCentral1); Instant triggered = app.get().deploymentJobs().jobStatus().get(productionUsCentral1).lastTriggered().get().at(); tester.clock().advance(Duration.ofHours(1)); Version v1 = new Version("7.1"); tester.upgradeSystem(v1); assertEquals(Change.of(v1), app.get().change()); tester.deployAndNotify(application, applicationPackage, true, systemTest); tester.deployAndNotify(application, applicationPackage, true, stagingTest); assertEquals(triggered, app.get().deploymentJobs().jobStatus().get(productionUsCentral1).lastTriggered().get().at()); tester.deployAndNotify(application, applicationPackage, false, productionEuWest1); tester.jobCompletion(component).application(application).nextBuildNumber().uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, applicationPackage, false, productionEuWest1); tester.deployAndNotify(application, applicationPackage, true, systemTest); tester.deployAndNotify(application, applicationPackage, true, stagingTest); tester.clock().advance(Duration.ofHours(1).plus(Duration.ofSeconds(1))); tester.readyJobTrigger().maintain(); assertEquals(v2, app.get().deployments().get(productionUsCentral1.zone(main).get()).version()); assertEquals(Long.valueOf(42L), app.get().deployments().get(productionUsCentral1.zone(main).get()).applicationVersion().buildNumber().get()); assertNotEquals(triggered, app.get().deploymentJobs().jobStatus().get(productionUsCentral1).lastTriggered().get().at()); tester.deployAndNotify(application, applicationPackage, false, productionUsCentral1); tester.deploy(productionUsCentral1, application, applicationPackage); assertEquals(v2, app.get().deployments().get(productionUsCentral1.zone(main).get()).version()); assertEquals(Long.valueOf(43), app.get().deployments().get(productionUsCentral1.zone(main).get()).applicationVersion().buildNumber().get()); tester.clock().advance(Duration.ofHours(2).plus(Duration.ofSeconds(1))); tester.readyJobTrigger().maintain(); tester.assertNotRunning(productionUsCentral1, application.id()); tester.deployAndNotify(application, empty(), true, systemTest); tester.deployAndNotify(application, empty(), true, stagingTest); tester.deployAndNotify(application, applicationPackage, true, productionEuWest1); assertFalse(app.get().change().isPresent()); assertFalse(app.get().deploymentJobs().jobStatus().get(productionUsCentral1).isSuccess()); }
tester.clock().advance(Duration.ofHours(1).plus(Duration.ofSeconds(1)));
public void stepIsCompletePreciselyWhenItShouldBe() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); Supplier<Application> app = () -> tester.application(application.id()); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("us-central-1") .region("eu-west-1") .upgradePolicy("canary") .build(); tester.deployCompletely(application, applicationPackage); Version v2 = new Version("7.2"); tester.upgradeSystem(v2); tester.completeUpgradeWithError(application, v2, applicationPackage, productionUsCentral1); tester.deploy(productionUsCentral1, application, applicationPackage); tester.deployAndNotify(application, applicationPackage, false, productionUsCentral1); assertEquals(v2, app.get().deployments().get(productionUsCentral1.zone(main).get()).version()); tester.deploymentTrigger().cancelChange(application.id(), false); tester.deployAndNotify(application, applicationPackage, false, productionUsCentral1); Instant triggered = app.get().deploymentJobs().jobStatus().get(productionUsCentral1).lastTriggered().get().at(); tester.clock().advance(Duration.ofHours(1)); Version v1 = new Version("7.1"); tester.upgradeSystem(v1); assertEquals(Change.of(v1), app.get().change()); tester.deployAndNotify(application, applicationPackage, true, systemTest); tester.deployAndNotify(application, applicationPackage, true, stagingTest); assertEquals(triggered, app.get().deploymentJobs().jobStatus().get(productionUsCentral1).lastTriggered().get().at()); tester.deployAndNotify(application, applicationPackage, false, productionEuWest1); tester.jobCompletion(component).application(application).nextBuildNumber().uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, applicationPackage, false, productionEuWest1); tester.deployAndNotify(application, applicationPackage, true, systemTest); tester.deployAndNotify(application, applicationPackage, true, stagingTest); tester.assertRunning(productionUsCentral1, application.id()); assertEquals(v2, app.get().deployments().get(productionUsCentral1.zone(main).get()).version()); assertEquals(Long.valueOf(42L), app.get().deployments().get(productionUsCentral1.zone(main).get()).applicationVersion().buildNumber().get()); assertNotEquals(triggered, app.get().deploymentJobs().jobStatus().get(productionUsCentral1).lastTriggered().get().at()); tester.deployAndNotify(application, applicationPackage, false, productionUsCentral1); tester.deploy(productionUsCentral1, application, applicationPackage); assertEquals(v2, app.get().deployments().get(productionUsCentral1.zone(main).get()).version()); assertEquals(Long.valueOf(43), app.get().deployments().get(productionUsCentral1.zone(main).get()).applicationVersion().buildNumber().get()); tester.clock().advance(Duration.ofHours(2).plus(Duration.ofSeconds(1))); tester.readyJobTrigger().maintain(); tester.assertNotRunning(productionUsCentral1, application.id()); tester.deployAndNotify(application, true, systemTest); tester.deployAndNotify(application, true, stagingTest); tester.deployAndNotify(application, applicationPackage, true, productionEuWest1); assertFalse(app.get().change().isPresent()); assertFalse(app.get().deploymentJobs().jobStatus().get(productionUsCentral1).isSuccess()); }
class DeploymentTriggerTest { @Test public void testTriggerFailing() { DeploymentTester tester = new DeploymentTester(); Application app = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .upgradePolicy("default") .environment(Environment.prod) .region("us-west-1") .build(); Version version = new Version(5, 1); tester.upgradeSystem(version); tester.jobCompletion(component).application(app).uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.systemTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.stagingTest); tester.deployAndNotify(app, applicationPackage, true, JobType.productionUsWest1); version = new Version(5, 2); tester.upgradeSystem(version); tester.buildService().remove(buildJob(app, stagingTest)); tester.readyJobTrigger().maintain(); assertEquals("Retried dead job", 2, tester.buildService().jobs().size()); tester.assertRunning(stagingTest, app.id()); tester.deployAndNotify(app, applicationPackage, true, stagingTest); tester.assertRunning(systemTest, app.id()); assertEquals(1, tester.buildService().jobs().size()); tester.deployAndNotify(app, applicationPackage, false, JobType.systemTest); assertEquals("Job is retried on failure", 1, tester.buildService().jobs().size()); tester.deployAndNotify(app, applicationPackage, true, JobType.systemTest); tester.assertRunning(productionUsWest1, app.id()); } @Test public void deploymentSpecDecidesTriggerOrder() { DeploymentTester tester = new DeploymentTester(); TenantName tenant = tester.controllerTester().createTenant("tenant1", "domain1", 1L); MockBuildService mockBuildService = tester.buildService(); Application application = tester.controllerTester().createApplication(tenant, "app1", "default", 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .build(); tester.jobCompletion(component).application(application).uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); tester.deployAndNotify(application, applicationPackage, true, JobType.productionCorpUsEast1); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsWest1); assertTrue("All jobs consumed", mockBuildService.jobs().isEmpty()); } @Test public void deploymentsSpecWithDelays() { DeploymentTester tester = new DeploymentTester(); MockBuildService mockBuildService = tester.buildService(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .delay(Duration.ofSeconds(30)) .region("us-west-1") .delay(Duration.ofMinutes(2)) .delay(Duration.ofMinutes(2)) .region("us-central-1") .delay(Duration.ofMinutes(10)) .build(); tester.jobCompletion(component).application(application).uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); tester.deploymentTrigger().triggerReadyJobs(); assertEquals(0, mockBuildService.jobs().size()); tester.clock().advance(Duration.ofSeconds(30)); tester.deploymentTrigger().triggerReadyJobs(); assertEquals(1, mockBuildService.jobs().size()); tester.assertRunning(productionUsWest1, application.id()); tester.clock().advance(Duration.ofMinutes(3)); tester.deploymentTrigger().triggerReadyJobs(); assertEquals(1, mockBuildService.jobs().size()); tester.assertRunning(productionUsWest1, application.id()); tester.deployAndNotify(application, applicationPackage, true, productionUsWest1); tester.deploymentTrigger().triggerReadyJobs(); assertTrue("No more jobs triggered at this time", mockBuildService.jobs().isEmpty()); tester.clock().advance(Duration.ofMinutes(3)); tester.deploymentTrigger().triggerReadyJobs(); assertTrue("No more jobs triggered at this time", mockBuildService.jobs().isEmpty()); tester.clock().advance(Duration.ofMinutes(1)); tester.deploymentTrigger().triggerReadyJobs(); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); assertTrue("All jobs consumed", mockBuildService.jobs().isEmpty()); tester.clock().advance(Duration.ofMinutes(10)); tester.deploymentTrigger().triggerReadyJobs(); assertTrue("All jobs consumed", mockBuildService.jobs().isEmpty()); } @Test public void deploymentSpecWithParallelDeployments() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("us-central-1") .parallel("us-west-1", "us-east-3") .region("eu-west-1") .build(); tester.jobCompletion(component).application(application).uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); assertEquals(1, tester.buildService().jobs().size()); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); assertEquals(2, tester.buildService().jobs().size()); tester.assertRunning(productionUsEast3, application.id()); tester.assertRunning(productionUsWest1, application.id()); tester.deploy(JobType.productionUsWest1, application, applicationPackage, false); tester.jobCompletion(JobType.productionUsWest1).application(application).submit(); assertEquals("One job still running.", JobType.productionUsEast3.jobName(), tester.buildService().jobs().get(0).jobName()); tester.deploy(JobType.productionUsEast3, application, applicationPackage, false); tester.jobCompletion(JobType.productionUsEast3).application(application).submit(); assertEquals(1, tester.buildService().jobs().size()); tester.deployAndNotify(application, applicationPackage, true, JobType.productionEuWest1); assertTrue("All jobs consumed", tester.buildService().jobs().isEmpty()); } @Test public void parallelDeploymentCompletesOutOfOrder() { DeploymentTester tester = new DeploymentTester(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .parallel("us-east-3", "us-west-1") .build(); Application app = tester.createApplication("app1", "tenant1", 1, 11L); tester.jobCompletion(component).application(app).uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.systemTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.stagingTest); tester.deploy(DeploymentJobs.JobType.productionUsWest1, app, applicationPackage); tester.jobCompletion(DeploymentJobs.JobType.productionUsWest1).application(app).submit(); assertTrue("Change is present as not all jobs are complete", tester.applications().require(app.id()).change().isPresent()); tester.deploy(DeploymentJobs.JobType.productionUsEast3, app, applicationPackage); tester.jobCompletion(JobType.productionUsEast3).application(app).submit(); assertFalse("Change has been deployed", tester.applications().require(app.id()).change().isPresent()); } @Test public void testSuccessfulDeploymentApplicationPackageChanged() { DeploymentTester tester = new DeploymentTester(); TenantName tenant = tester.controllerTester().createTenant("tenant1", "domain1", 1L); MockBuildService mockBuildService = tester.buildService(); Application application = tester.controllerTester().createApplication(tenant, "app1", "default", 1L); ApplicationPackage previousApplicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .build(); ApplicationPackage newApplicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .region("eu-west-1") .build(); tester.jobCompletion(component).application(application).uploadArtifact(newApplicationPackage).submit(); tester.deployAndNotify(application, newApplicationPackage, true, JobType.systemTest); tester.deploy(JobType.stagingTest, application, previousApplicationPackage, true); tester.deployAndNotify(application, newApplicationPackage, true, JobType.stagingTest); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionCorpUsEast1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionUsCentral1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionUsWest1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionEuWest1); assertTrue("All jobs consumed", mockBuildService.jobs().isEmpty()); } @Test public void testBlockRevisionChange() { ManualClock clock = new ManualClock(Instant.parse("2017-09-26T17:30:00.00Z")); DeploymentTester tester = new DeploymentTester(new ControllerTester(clock)); ReadyJobsTrigger readyJobsTrigger = new ReadyJobsTrigger(tester.controller(), Duration.ofHours(1), new JobControl(tester.controllerTester().curator())); Version version = Version.fromString("5.0"); tester.upgradeSystem(version); ApplicationPackageBuilder applicationPackageBuilder = new ApplicationPackageBuilder() .upgradePolicy("canary") .blockChange(true, false, "tue", "18-19", "UTC") .region("us-west-1") .region("us-central-1") .region("us-east-3"); Application app = tester.createAndDeploy("app1", 1, applicationPackageBuilder.build()); tester.clock().advance(Duration.ofHours(1)); readyJobsTrigger.run(); assertEquals(0, tester.buildService().jobs().size()); String searchDefinition = "search test {\n" + " document test {\n" + " field test type string {\n" + " }\n" + " }\n" + "}\n"; ApplicationPackage changedApplication = applicationPackageBuilder.searchDefinition(searchDefinition).build(); tester.jobCompletion(component) .application(app) .nextBuildNumber() .sourceRevision(new SourceRevision("repository1", "master", "cafed00d")) .uploadArtifact(changedApplication) .submit(); assertTrue(tester.applications().require(app.id()).change().isPresent()); tester.deployAndNotify(app, changedApplication, true, systemTest); tester.deployAndNotify(app, changedApplication, true, stagingTest); readyJobsTrigger.run(); assertEquals(0, tester.buildService().jobs().size()); tester.clock().advance(Duration.ofHours(2)); tester.deploymentTrigger().triggerReadyJobs(); tester.deployAndNotify(app, changedApplication, true, stagingTest); assertEquals(singletonList(buildJob(app, productionUsWest1)), tester.buildService().jobs()); } @Test public void testCompletionOfPartOfChangeDuringBlockWindow() { ManualClock clock = new ManualClock(Instant.parse("2017-09-26T17:30:00.00Z")); DeploymentTester tester = new DeploymentTester(new ControllerTester(clock)); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .blockChange(false, true, "tue", "18", "UTC") .region("us-west-1") .region("us-east-3") .build(); Application application = tester.createAndDeploy("app1", 1, applicationPackage); Version v1 = Version.fromString("6.1"); Version v2 = Version.fromString("6.2"); tester.upgradeSystem(v2); tester.jobCompletion(component).application(application).nextBuildNumber().uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, applicationPackage, true, stagingTest); tester.deployAndNotify(application, applicationPackage, true, systemTest); clock.advance(Duration.ofHours(1)); tester.deployAndNotify(application, applicationPackage, true, productionUsWest1); assertEquals((Long) BuildJob.defaultBuildNumber, tester.application(application.id()).deploymentJobs().jobStatus() .get(productionUsWest1).lastSuccess().get().application().buildNumber().get()); assertEquals((Long) (BuildJob.defaultBuildNumber + 1), tester.application(application.id()).outstandingChange().application().get().buildNumber().get()); tester.readyJobTrigger().maintain(); assertTrue(tester.buildService().jobs().isEmpty()); tester.jobCompletion(component).application(application).nextBuildNumber().nextBuildNumber().uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, applicationPackage, true, stagingTest); tester.deployAndNotify(application, applicationPackage, true, systemTest); tester.deployAndNotify(application, applicationPackage, true, productionUsWest1); tester.deployAndNotify(application, applicationPackage, true, systemTest); tester.deployAndNotify(application, applicationPackage, true, stagingTest); tester.deployAndNotify(application, applicationPackage, true, productionUsEast3); tester.deployAndNotify(application, applicationPackage, true, systemTest); tester.deployAndNotify(application, applicationPackage, true, stagingTest); assertTrue(tester.buildService().jobs().isEmpty()); assertEquals(Change.of(v2), tester.application(application.id()).change()); clock.advance(Duration.ofHours(1)); tester.readyJobTrigger().maintain(); tester.deployAndNotify(application, applicationPackage, true, stagingTest); tester.deployAndNotify(application, applicationPackage, true, productionUsEast3); assertFalse(tester.application(application.id()).change().isPresent()); assertFalse(tester.application(application.id()).outstandingChange().isPresent()); } @Test public void testUpgradingButNoJobStarted() { DeploymentTester tester = new DeploymentTester(); ReadyJobsTrigger readyJobsTrigger = new ReadyJobsTrigger(tester.controller(), Duration.ofHours(1), new JobControl(tester.controllerTester().curator())); Application app = tester.createAndDeploy("default0", 3, "default"); tester.controller().applications().lockOrThrow(app.id(), locked -> { tester.controller().applications().store(locked.withChange(Change.of(Version.fromString("6.2")))); }); assertEquals(0, tester.buildService().jobs().size()); readyJobsTrigger.run(); tester.assertRunning(systemTest, app.id()); tester.assertRunning(stagingTest, app.id()); } @Test public void applicationVersionIsNotDowngraded() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); Supplier<Application> app = () -> tester.application(application.id()); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("us-central-1") .region("eu-west-1") .build(); tester.deployCompletely(application, applicationPackage); tester.completeDeploymentWithError(application, applicationPackage, BuildJob.defaultBuildNumber + 1, productionUsCentral1); tester.deployAndNotify(application, empty(), false, productionUsCentral1); tester.deploy(productionUsCentral1, application, empty(), false); ApplicationVersion appVersion1 = ApplicationVersion.from(BuildJob.defaultSourceRevision, BuildJob.defaultBuildNumber + 1); assertEquals(appVersion1, app.get().deployments().get(ZoneId.from("prod.us-central-1")).applicationVersion()); tester.deploymentTrigger().cancelChange(application.id(), true); assertEquals(Change.of(appVersion1), app.get().change()); tester.deploymentTrigger().cancelChange(application.id(), false); assertEquals(Change.empty(), app.get().change()); Version version1 = new Version("6.2"); tester.upgradeSystem(version1); tester.jobCompletion(productionUsCentral1).application(application).unsuccessful().submit(); tester.deployAndNotify(application, empty(), true, systemTest); tester.deployAndNotify(application, empty(), true, stagingTest); tester.deployAndNotify(application, empty(), false, productionUsCentral1); tester.deployAndNotify(application, empty(), true, systemTest); tester.deployAndNotify(application, empty(), true, stagingTest); tester.deployAndNotify(application, empty(), true, productionUsCentral1); tester.deployAndNotify(application, empty(), true, productionEuWest1); assertEquals(appVersion1, app.get().deployments().get(ZoneId.from("prod.us-central-1")).applicationVersion()); } @Test @Test public void eachDeployTargetIsTested() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); Supplier<Application> app = () -> tester.application(application.id()); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .parallel("eu-west-1", "us-east-3") .build(); tester.deployCompletely(application, applicationPackage); Version v1 = new Version("6.1"); Version v2 = new Version("6.2"); tester.upgradeSystem(v2); tester.deployAndNotify(application, empty(), true, systemTest); tester.deployAndNotify(application, empty(), true, stagingTest); tester.deploymentTrigger().cancelChange(application.id(), true); tester.deploy(productionEuWest1, application, applicationPackage); assertEquals(v2, app.get().deployments().get(productionEuWest1.zone(main).get()).version()); assertEquals(v1, app.get().deployments().get(productionUsEast3.zone(main).get()).version()); tester.jobCompletion(component).application(application).nextBuildNumber().uploadArtifact(applicationPackage).submit(); Version firstTested = app.get().deploymentJobs().jobStatus().get(systemTest).lastTriggered().get().platform(); assertEquals(firstTested, app.get().deploymentJobs().jobStatus().get(stagingTest).lastTriggered().get().platform()); tester.deployAndNotify(application, empty(), true, systemTest); tester.deployAndNotify(application, empty(), true, stagingTest); assertEquals(firstTested, app.get().deploymentJobs().jobStatus().get(systemTest).lastTriggered().get().platform()); assertEquals(firstTested, app.get().deploymentJobs().jobStatus().get(stagingTest).lastTriggered().get().platform()); tester.deployAndNotify(application, applicationPackage, false, productionEuWest1); tester.deployAndNotify(application, applicationPackage, false, productionUsEast3); tester.triggerUntilQuiescence(); assertNotEquals(firstTested, app.get().deploymentJobs().jobStatus().get(systemTest).lastTriggered().get().platform()); assertNotEquals(firstTested, app.get().deploymentJobs().jobStatus().get(stagingTest).lastTriggered().get().platform()); tester.deployAndNotify(application, empty(), true, systemTest); tester.deployAndNotify(application, empty(), true, stagingTest); tester.deployAndNotify(application, empty(), false, productionEuWest1); tester.deployAndNotify(application, empty(), false, productionUsEast3); tester.deployAndNotify(application, empty(), true, productionUsEast3); tester.deployAndNotify(application, empty(), true, productionEuWest1); assertFalse(app.get().change().isPresent()); assertEquals(43, app.get().deploymentJobs().jobStatus().get(productionEuWest1).lastSuccess().get().application().buildNumber().get().longValue()); assertEquals(43, app.get().deploymentJobs().jobStatus().get(productionUsEast3).lastSuccess().get().application().buildNumber().get().longValue()); } @Test public void eachDifferentUpgradeCombinationIsTested() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); Supplier<Application> app = () -> tester.application(application.id()); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("us-central-1") .parallel("eu-west-1", "us-east-3") .build(); tester.deployCompletely(application, applicationPackage); Version v1 = new Version("6.1"); Version v2 = new Version("6.2"); tester.upgradeSystem(v2); tester.deployAndNotify(application, empty(), true, systemTest); tester.deployAndNotify(application, empty(), true, stagingTest); tester.deployAndNotify(application, empty(), true, productionUsCentral1); tester.deployAndNotify(application, empty(), true, productionEuWest1); tester.deployAndNotify(application, empty(), false, productionUsEast3); assertEquals(v2, app.get().deployments().get(ZoneId.from("prod", "us-central-1")).version()); assertEquals(v2, app.get().deployments().get(ZoneId.from("prod", "eu-west-1")).version()); assertEquals(v1, app.get().deployments().get(ZoneId.from("prod", "us-east-3")).version()); Version v3 = new Version("6.3"); tester.upgradeSystem(v3); tester.deployAndNotify(application, empty(), false, productionUsEast3); tester.deployAndNotify(application, empty(), true, systemTest); tester.deployAndNotify(application, empty(), true, stagingTest); assertEquals(v2, app.get().deploymentJobs().jobStatus().get(stagingTest).lastSuccess().get().sourcePlatform().get()); tester.deployAndNotify(application, empty(), true, productionUsCentral1); assertEquals(v1, app.get().deploymentJobs().jobStatus().get(stagingTest).lastTriggered().get().sourcePlatform().get()); tester.deployAndNotify(application, empty(), true, stagingTest); tester.deployAndNotify(application, empty(), true, productionEuWest1); tester.deployAndNotify(application, empty(), true, productionUsEast3); } }
class DeploymentTriggerTest { @Test public void testTriggerFailing() { DeploymentTester tester = new DeploymentTester(); Application app = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .upgradePolicy("default") .environment(Environment.prod) .region("us-west-1") .build(); Version version = new Version(5, 1); tester.upgradeSystem(version); tester.jobCompletion(component).application(app).uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.systemTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.stagingTest); tester.deployAndNotify(app, applicationPackage, true, JobType.productionUsWest1); version = new Version(5, 2); tester.upgradeSystem(version); tester.buildService().remove(buildJob(app, stagingTest)); tester.readyJobTrigger().maintain(); assertEquals("Retried dead job", 2, tester.buildService().jobs().size()); tester.assertRunning(stagingTest, app.id()); tester.deployAndNotify(app, applicationPackage, true, stagingTest); tester.assertRunning(systemTest, app.id()); assertEquals(1, tester.buildService().jobs().size()); tester.deployAndNotify(app, applicationPackage, false, JobType.systemTest); assertEquals("Job is retried on failure", 1, tester.buildService().jobs().size()); tester.deployAndNotify(app, applicationPackage, true, JobType.systemTest); tester.assertRunning(productionUsWest1, app.id()); } @Test public void deploymentSpecDecidesTriggerOrder() { DeploymentTester tester = new DeploymentTester(); TenantName tenant = tester.controllerTester().createTenant("tenant1", "domain1", 1L); MockBuildService mockBuildService = tester.buildService(); Application application = tester.controllerTester().createApplication(tenant, "app1", "default", 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .build(); tester.jobCompletion(component).application(application).uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); tester.deployAndNotify(application, applicationPackage, true, JobType.productionCorpUsEast1); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsWest1); assertTrue("All jobs consumed", mockBuildService.jobs().isEmpty()); } @Test public void deploymentsSpecWithDelays() { DeploymentTester tester = new DeploymentTester(); MockBuildService mockBuildService = tester.buildService(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .delay(Duration.ofSeconds(30)) .region("us-west-1") .delay(Duration.ofMinutes(2)) .delay(Duration.ofMinutes(2)) .region("us-central-1") .delay(Duration.ofMinutes(10)) .build(); tester.jobCompletion(component).application(application).uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); tester.deploymentTrigger().triggerReadyJobs(); assertEquals(0, mockBuildService.jobs().size()); tester.clock().advance(Duration.ofSeconds(30)); tester.deploymentTrigger().triggerReadyJobs(); assertEquals(1, mockBuildService.jobs().size()); tester.assertRunning(productionUsWest1, application.id()); tester.clock().advance(Duration.ofMinutes(3)); tester.deploymentTrigger().triggerReadyJobs(); assertEquals(1, mockBuildService.jobs().size()); tester.assertRunning(productionUsWest1, application.id()); tester.deployAndNotify(application, applicationPackage, true, productionUsWest1); tester.deploymentTrigger().triggerReadyJobs(); assertTrue("No more jobs triggered at this time", mockBuildService.jobs().isEmpty()); tester.clock().advance(Duration.ofMinutes(3)); tester.deploymentTrigger().triggerReadyJobs(); assertTrue("No more jobs triggered at this time", mockBuildService.jobs().isEmpty()); tester.clock().advance(Duration.ofMinutes(1)); tester.deploymentTrigger().triggerReadyJobs(); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); assertTrue("All jobs consumed", mockBuildService.jobs().isEmpty()); tester.clock().advance(Duration.ofMinutes(10)); tester.deploymentTrigger().triggerReadyJobs(); assertTrue("All jobs consumed", mockBuildService.jobs().isEmpty()); } @Test public void deploymentSpecWithParallelDeployments() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("us-central-1") .parallel("us-west-1", "us-east-3") .region("eu-west-1") .build(); tester.jobCompletion(component).application(application).uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); assertEquals(1, tester.buildService().jobs().size()); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); assertEquals(2, tester.buildService().jobs().size()); tester.assertRunning(productionUsEast3, application.id()); tester.assertRunning(productionUsWest1, application.id()); tester.deploy(JobType.productionUsWest1, application, applicationPackage, false); tester.jobCompletion(JobType.productionUsWest1).application(application).submit(); assertEquals("One job still running.", JobType.productionUsEast3.jobName(), tester.buildService().jobs().get(0).jobName()); tester.deploy(JobType.productionUsEast3, application, applicationPackage, false); tester.jobCompletion(JobType.productionUsEast3).application(application).submit(); assertEquals(1, tester.buildService().jobs().size()); tester.deployAndNotify(application, applicationPackage, true, JobType.productionEuWest1); assertTrue("All jobs consumed", tester.buildService().jobs().isEmpty()); } @Test public void parallelDeploymentCompletesOutOfOrder() { DeploymentTester tester = new DeploymentTester(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .parallel("us-east-3", "us-west-1") .build(); Application app = tester.createApplication("app1", "tenant1", 1, 11L); tester.jobCompletion(component).application(app).uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.systemTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.stagingTest); tester.deploy(DeploymentJobs.JobType.productionUsWest1, app, applicationPackage); tester.jobCompletion(DeploymentJobs.JobType.productionUsWest1).application(app).submit(); assertTrue("Change is present as not all jobs are complete", tester.applications().require(app.id()).change().isPresent()); tester.deploy(DeploymentJobs.JobType.productionUsEast3, app, applicationPackage); tester.jobCompletion(JobType.productionUsEast3).application(app).submit(); assertFalse("Change has been deployed", tester.applications().require(app.id()).change().isPresent()); } @Test public void testSuccessfulDeploymentApplicationPackageChanged() { DeploymentTester tester = new DeploymentTester(); TenantName tenant = tester.controllerTester().createTenant("tenant1", "domain1", 1L); MockBuildService mockBuildService = tester.buildService(); Application application = tester.controllerTester().createApplication(tenant, "app1", "default", 1L); ApplicationPackage previousApplicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .build(); ApplicationPackage newApplicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .region("eu-west-1") .build(); tester.jobCompletion(component).application(application).uploadArtifact(newApplicationPackage).submit(); tester.deployAndNotify(application, newApplicationPackage, true, JobType.systemTest); tester.deploy(JobType.stagingTest, application, previousApplicationPackage, true); tester.deployAndNotify(application, newApplicationPackage, true, JobType.stagingTest); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionCorpUsEast1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionUsCentral1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionUsWest1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionEuWest1); assertTrue("All jobs consumed", mockBuildService.jobs().isEmpty()); } @Test public void testBlockRevisionChange() { ManualClock clock = new ManualClock(Instant.parse("2017-09-26T17:30:00.00Z")); DeploymentTester tester = new DeploymentTester(new ControllerTester(clock)); ReadyJobsTrigger readyJobsTrigger = new ReadyJobsTrigger(tester.controller(), Duration.ofHours(1), new JobControl(tester.controllerTester().curator())); Version version = Version.fromString("5.0"); tester.upgradeSystem(version); ApplicationPackageBuilder applicationPackageBuilder = new ApplicationPackageBuilder() .upgradePolicy("canary") .blockChange(true, false, "tue", "18-19", "UTC") .region("us-west-1") .region("us-central-1") .region("us-east-3"); Application app = tester.createAndDeploy("app1", 1, applicationPackageBuilder.build()); tester.clock().advance(Duration.ofHours(1)); readyJobsTrigger.run(); assertEquals(0, tester.buildService().jobs().size()); String searchDefinition = "search test {\n" + " document test {\n" + " field test type string {\n" + " }\n" + " }\n" + "}\n"; ApplicationPackage changedApplication = applicationPackageBuilder.searchDefinition(searchDefinition).build(); tester.jobCompletion(component) .application(app) .nextBuildNumber() .sourceRevision(new SourceRevision("repository1", "master", "cafed00d")) .uploadArtifact(changedApplication) .submit(); assertTrue(tester.applications().require(app.id()).change().isPresent()); tester.deployAndNotify(app, changedApplication, true, systemTest); tester.deployAndNotify(app, changedApplication, true, stagingTest); readyJobsTrigger.run(); assertEquals(0, tester.buildService().jobs().size()); tester.clock().advance(Duration.ofHours(2)); tester.deploymentTrigger().triggerReadyJobs(); tester.deployAndNotify(app, changedApplication, true, stagingTest); assertEquals(singletonList(buildJob(app, productionUsWest1)), tester.buildService().jobs()); } @Test public void testCompletionOfPartOfChangeDuringBlockWindow() { ManualClock clock = new ManualClock(Instant.parse("2017-09-26T17:30:00.00Z")); DeploymentTester tester = new DeploymentTester(new ControllerTester(clock)); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .blockChange(false, true, "tue", "18", "UTC") .region("us-west-1") .region("us-east-3") .build(); Application application = tester.createAndDeploy("app1", 1, applicationPackage); Version v1 = Version.fromString("6.1"); Version v2 = Version.fromString("6.2"); tester.upgradeSystem(v2); tester.jobCompletion(component).application(application).nextBuildNumber().uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, applicationPackage, true, stagingTest); tester.deployAndNotify(application, applicationPackage, true, systemTest); clock.advance(Duration.ofHours(1)); tester.deployAndNotify(application, applicationPackage, true, productionUsWest1); assertEquals((Long) BuildJob.defaultBuildNumber, tester.application(application.id()).deploymentJobs().jobStatus() .get(productionUsWest1).lastSuccess().get().application().buildNumber().get()); assertEquals((Long) (BuildJob.defaultBuildNumber + 1), tester.application(application.id()).outstandingChange().application().get().buildNumber().get()); tester.readyJobTrigger().maintain(); assertTrue(tester.buildService().jobs().isEmpty()); tester.jobCompletion(component).application(application).nextBuildNumber().nextBuildNumber().uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, applicationPackage, true, stagingTest); tester.deployAndNotify(application, applicationPackage, true, systemTest); tester.deployAndNotify(application, applicationPackage, true, productionUsWest1); tester.deployAndNotify(application, applicationPackage, true, systemTest); tester.deployAndNotify(application, applicationPackage, true, stagingTest); tester.deployAndNotify(application, applicationPackage, true, productionUsEast3); tester.deployAndNotify(application, applicationPackage, true, systemTest); tester.deployAndNotify(application, applicationPackage, true, stagingTest); assertTrue(tester.buildService().jobs().isEmpty()); assertEquals(Change.of(v2), tester.application(application.id()).change()); clock.advance(Duration.ofHours(1)); tester.readyJobTrigger().maintain(); tester.deployAndNotify(application, applicationPackage, true, stagingTest); tester.deployAndNotify(application, applicationPackage, true, productionUsEast3); assertFalse(tester.application(application.id()).change().isPresent()); assertFalse(tester.application(application.id()).outstandingChange().isPresent()); } @Test public void testUpgradingButNoJobStarted() { DeploymentTester tester = new DeploymentTester(); ReadyJobsTrigger readyJobsTrigger = new ReadyJobsTrigger(tester.controller(), Duration.ofHours(1), new JobControl(tester.controllerTester().curator())); Application app = tester.createAndDeploy("default0", 3, "default"); tester.controller().applications().lockOrThrow(app.id(), locked -> { tester.controller().applications().store(locked.withChange(Change.of(Version.fromString("6.2")))); }); assertEquals(0, tester.buildService().jobs().size()); readyJobsTrigger.run(); tester.assertRunning(systemTest, app.id()); tester.assertRunning(stagingTest, app.id()); } @Test public void applicationVersionIsNotDowngraded() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); Supplier<Application> app = () -> tester.application(application.id()); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("us-central-1") .region("eu-west-1") .build(); tester.deployCompletely(application, applicationPackage); tester.completeDeploymentWithError(application, applicationPackage, BuildJob.defaultBuildNumber + 1, productionUsCentral1); tester.deployAndNotify(application, false, productionUsCentral1); tester.deploy(productionUsCentral1, application, Optional.empty(), false); ApplicationVersion appVersion1 = ApplicationVersion.from(BuildJob.defaultSourceRevision, BuildJob.defaultBuildNumber + 1); assertEquals(appVersion1, app.get().deployments().get(ZoneId.from("prod.us-central-1")).applicationVersion()); tester.deploymentTrigger().cancelChange(application.id(), true); assertEquals(Change.of(appVersion1), app.get().change()); tester.deploymentTrigger().cancelChange(application.id(), false); assertEquals(Change.empty(), app.get().change()); Version version1 = new Version("6.2"); tester.upgradeSystem(version1); tester.jobCompletion(productionUsCentral1).application(application).unsuccessful().submit(); tester.deployAndNotify(application, true, systemTest); tester.deployAndNotify(application, true, stagingTest); tester.deployAndNotify(application, false, productionUsCentral1); tester.deployAndNotify(application, true, systemTest); tester.deployAndNotify(application, true, stagingTest); tester.deployAndNotify(application, true, productionUsCentral1); tester.deployAndNotify(application, true, productionEuWest1); assertEquals(appVersion1, app.get().deployments().get(ZoneId.from("prod.us-central-1")).applicationVersion()); } @Test @Test public void eachDeployTargetIsTested() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); Supplier<Application> app = () -> tester.application(application.id()); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .parallel("eu-west-1", "us-east-3") .build(); tester.deployCompletely(application, applicationPackage); Version v1 = new Version("6.1"); Version v2 = new Version("6.2"); tester.upgradeSystem(v2); tester.deployAndNotify(application, true, systemTest); tester.deployAndNotify(application, true, stagingTest); tester.deploymentTrigger().cancelChange(application.id(), true); tester.deploy(productionEuWest1, application, applicationPackage); assertEquals(v2, app.get().deployments().get(productionEuWest1.zone(main).get()).version()); assertEquals(v1, app.get().deployments().get(productionUsEast3.zone(main).get()).version()); tester.jobCompletion(component).application(application).nextBuildNumber().uploadArtifact(applicationPackage).submit(); Version firstTested = app.get().deploymentJobs().jobStatus().get(systemTest).lastTriggered().get().platform(); assertEquals(firstTested, app.get().deploymentJobs().jobStatus().get(stagingTest).lastTriggered().get().platform()); tester.deployAndNotify(application, true, systemTest); tester.deployAndNotify(application, true, stagingTest); assertEquals(firstTested, app.get().deploymentJobs().jobStatus().get(systemTest).lastTriggered().get().platform()); assertEquals(firstTested, app.get().deploymentJobs().jobStatus().get(stagingTest).lastTriggered().get().platform()); tester.deployAndNotify(application, applicationPackage, false, productionEuWest1); tester.deployAndNotify(application, applicationPackage, false, productionUsEast3); tester.triggerUntilQuiescence(); assertNotEquals(firstTested, app.get().deploymentJobs().jobStatus().get(systemTest).lastTriggered().get().platform()); assertNotEquals(firstTested, app.get().deploymentJobs().jobStatus().get(stagingTest).lastTriggered().get().platform()); tester.deployAndNotify(application, true, systemTest); tester.deployAndNotify(application, true, stagingTest); tester.deployAndNotify(application, false, productionEuWest1); tester.deployAndNotify(application, false, productionUsEast3); tester.deployAndNotify(application, true, productionUsEast3); tester.deployAndNotify(application, true, productionEuWest1); assertFalse(app.get().change().isPresent()); assertEquals(43, app.get().deploymentJobs().jobStatus().get(productionEuWest1).lastSuccess().get().application().buildNumber().get().longValue()); assertEquals(43, app.get().deploymentJobs().jobStatus().get(productionUsEast3).lastSuccess().get().application().buildNumber().get().longValue()); } @Test public void eachDifferentUpgradeCombinationIsTested() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); Supplier<Application> app = () -> tester.application(application.id()); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("us-central-1") .parallel("eu-west-1", "us-east-3") .build(); tester.deployCompletely(application, applicationPackage); Version v1 = new Version("6.1"); Version v2 = new Version("6.2"); tester.upgradeSystem(v2); tester.deployAndNotify(application, true, systemTest); tester.deployAndNotify(application, true, stagingTest); tester.deployAndNotify(application, true, productionUsCentral1); tester.deployAndNotify(application, true, productionEuWest1); tester.deployAndNotify(application, false, productionUsEast3); assertEquals(v2, app.get().deployments().get(ZoneId.from("prod", "us-central-1")).version()); assertEquals(v2, app.get().deployments().get(ZoneId.from("prod", "eu-west-1")).version()); assertEquals(v1, app.get().deployments().get(ZoneId.from("prod", "us-east-3")).version()); Version v3 = new Version("6.3"); tester.upgradeSystem(v3); tester.deployAndNotify(application, false, productionUsEast3); tester.deployAndNotify(application, true, systemTest); tester.deployAndNotify(application, true, stagingTest); assertEquals(v2, app.get().deploymentJobs().jobStatus().get(stagingTest).lastSuccess().get().sourcePlatform().get()); tester.deployAndNotify(application, true, productionUsCentral1); assertEquals(v1, app.get().deploymentJobs().jobStatus().get(stagingTest).lastTriggered().get().sourcePlatform().get()); tester.deployAndNotify(application, true, stagingTest); tester.deployAndNotify(application, true, productionEuWest1); tester.deployAndNotify(application, true, productionUsEast3); } @Test public void retriesFailingJobs() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("us-central-1") .build(); tester.deployCompletely(application, applicationPackage); tester.jobCompletion(component).application(application).nextBuildNumber().uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, false, systemTest); tester.deployAndNotify(application, true, stagingTest); tester.clock().advance(Duration.ofSeconds(59)); tester.jobCompletion(systemTest).application(application).unsuccessful().submit(); tester.readyJobTrigger().maintain(); tester.assertRunning(systemTest, application.id()); tester.clock().advance(Duration.ofSeconds(1)); tester.jobCompletion(systemTest).application(application).unsuccessful().submit(); tester.readyJobTrigger().maintain(); tester.assertNotRunning(systemTest, application.id()); tester.clock().advance(Duration.ofMinutes(10).plus(Duration.ofSeconds(1))); tester.readyJobTrigger().maintain(); tester.assertRunning(systemTest, application.id()); tester.clock().advance(Duration.ofMinutes(50)); tester.jobCompletion(systemTest).application(application).unsuccessful().submit(); tester.readyJobTrigger().maintain(); tester.assertNotRunning(systemTest, application.id()); tester.clock().advance(Duration.ofHours(2).plus(Duration.ofSeconds(1))); tester.readyJobTrigger().maintain(); tester.assertRunning(systemTest, application.id()); tester.jobCompletion(systemTest).application(application).unsuccessful().submit(); tester.readyJobTrigger().maintain(); tester.assertNotRunning(systemTest, application.id()); tester.jobCompletion(component).application(application).nextBuildNumber(2).uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, true, systemTest); tester.deployAndNotify(application, true, stagingTest); tester.deployAndNotify(application, true, productionUsCentral1); assertTrue("Deployment completed", tester.buildService().jobs().isEmpty()); } }
Best test ever!
public void retriesFailingJobs() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("us-central-1") .build(); tester.deployCompletely(application, applicationPackage); tester.jobCompletion(component).application(application).nextBuildNumber().uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, false, systemTest); tester.deployAndNotify(application, true, stagingTest); tester.clock().advance(Duration.ofSeconds(59)); tester.jobCompletion(systemTest).application(application).unsuccessful().submit(); tester.readyJobTrigger().maintain(); tester.assertRunning(systemTest, application.id()); tester.clock().advance(Duration.ofSeconds(1)); tester.jobCompletion(systemTest).application(application).unsuccessful().submit(); tester.readyJobTrigger().maintain(); tester.assertNotRunning(systemTest, application.id()); tester.clock().advance(Duration.ofMinutes(10).plus(Duration.ofSeconds(1))); tester.readyJobTrigger().maintain(); tester.assertRunning(systemTest, application.id()); tester.clock().advance(Duration.ofMinutes(50)); tester.jobCompletion(systemTest).application(application).unsuccessful().submit(); tester.readyJobTrigger().maintain(); tester.assertNotRunning(systemTest, application.id()); tester.clock().advance(Duration.ofHours(2).plus(Duration.ofSeconds(1))); tester.readyJobTrigger().maintain(); tester.assertRunning(systemTest, application.id()); tester.jobCompletion(systemTest).application(application).unsuccessful().submit(); tester.readyJobTrigger().maintain(); tester.assertNotRunning(systemTest, application.id()); tester.jobCompletion(component).application(application).nextBuildNumber(2).uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, true, systemTest); tester.deployAndNotify(application, true, stagingTest); tester.deployAndNotify(application, true, productionUsCentral1); assertTrue("Deployment completed", tester.buildService().jobs().isEmpty()); }
assertTrue("Deployment completed", tester.buildService().jobs().isEmpty());
public void retriesFailingJobs() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("us-central-1") .build(); tester.deployCompletely(application, applicationPackage); tester.jobCompletion(component).application(application).nextBuildNumber().uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, false, systemTest); tester.deployAndNotify(application, true, stagingTest); tester.clock().advance(Duration.ofSeconds(59)); tester.jobCompletion(systemTest).application(application).unsuccessful().submit(); tester.readyJobTrigger().maintain(); tester.assertRunning(systemTest, application.id()); tester.clock().advance(Duration.ofSeconds(1)); tester.jobCompletion(systemTest).application(application).unsuccessful().submit(); tester.readyJobTrigger().maintain(); tester.assertNotRunning(systemTest, application.id()); tester.clock().advance(Duration.ofMinutes(10).plus(Duration.ofSeconds(1))); tester.readyJobTrigger().maintain(); tester.assertRunning(systemTest, application.id()); tester.clock().advance(Duration.ofMinutes(50)); tester.jobCompletion(systemTest).application(application).unsuccessful().submit(); tester.readyJobTrigger().maintain(); tester.assertNotRunning(systemTest, application.id()); tester.clock().advance(Duration.ofHours(2).plus(Duration.ofSeconds(1))); tester.readyJobTrigger().maintain(); tester.assertRunning(systemTest, application.id()); tester.jobCompletion(systemTest).application(application).unsuccessful().submit(); tester.readyJobTrigger().maintain(); tester.assertNotRunning(systemTest, application.id()); tester.jobCompletion(component).application(application).nextBuildNumber(2).uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, true, systemTest); tester.deployAndNotify(application, true, stagingTest); tester.deployAndNotify(application, true, productionUsCentral1); assertTrue("Deployment completed", tester.buildService().jobs().isEmpty()); }
class DeploymentTriggerTest { @Test public void testTriggerFailing() { DeploymentTester tester = new DeploymentTester(); Application app = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .upgradePolicy("default") .environment(Environment.prod) .region("us-west-1") .build(); Version version = new Version(5, 1); tester.upgradeSystem(version); tester.jobCompletion(component).application(app).uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.systemTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.stagingTest); tester.deployAndNotify(app, applicationPackage, true, JobType.productionUsWest1); version = new Version(5, 2); tester.upgradeSystem(version); tester.buildService().remove(buildJob(app, stagingTest)); tester.readyJobTrigger().maintain(); assertEquals("Retried dead job", 2, tester.buildService().jobs().size()); tester.assertRunning(stagingTest, app.id()); tester.deployAndNotify(app, applicationPackage, true, stagingTest); tester.assertRunning(systemTest, app.id()); assertEquals(1, tester.buildService().jobs().size()); tester.deployAndNotify(app, applicationPackage, false, JobType.systemTest); assertEquals("Job is retried on failure", 1, tester.buildService().jobs().size()); tester.deployAndNotify(app, applicationPackage, true, JobType.systemTest); tester.assertRunning(productionUsWest1, app.id()); } @Test public void deploymentSpecDecidesTriggerOrder() { DeploymentTester tester = new DeploymentTester(); TenantName tenant = tester.controllerTester().createTenant("tenant1", "domain1", 1L); MockBuildService mockBuildService = tester.buildService(); Application application = tester.controllerTester().createApplication(tenant, "app1", "default", 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .build(); tester.jobCompletion(component).application(application).uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); tester.deployAndNotify(application, applicationPackage, true, JobType.productionCorpUsEast1); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsWest1); assertTrue("All jobs consumed", mockBuildService.jobs().isEmpty()); } @Test public void deploymentsSpecWithDelays() { DeploymentTester tester = new DeploymentTester(); MockBuildService mockBuildService = tester.buildService(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .delay(Duration.ofSeconds(30)) .region("us-west-1") .delay(Duration.ofMinutes(2)) .delay(Duration.ofMinutes(2)) .region("us-central-1") .delay(Duration.ofMinutes(10)) .build(); tester.jobCompletion(component).application(application).uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); tester.deploymentTrigger().triggerReadyJobs(); assertEquals(0, mockBuildService.jobs().size()); tester.clock().advance(Duration.ofSeconds(30)); tester.deploymentTrigger().triggerReadyJobs(); assertEquals(1, mockBuildService.jobs().size()); tester.assertRunning(productionUsWest1, application.id()); tester.clock().advance(Duration.ofMinutes(3)); tester.deploymentTrigger().triggerReadyJobs(); assertEquals(1, mockBuildService.jobs().size()); tester.assertRunning(productionUsWest1, application.id()); tester.deployAndNotify(application, applicationPackage, true, productionUsWest1); tester.deploymentTrigger().triggerReadyJobs(); assertTrue("No more jobs triggered at this time", mockBuildService.jobs().isEmpty()); tester.clock().advance(Duration.ofMinutes(3)); tester.deploymentTrigger().triggerReadyJobs(); assertTrue("No more jobs triggered at this time", mockBuildService.jobs().isEmpty()); tester.clock().advance(Duration.ofMinutes(1)); tester.deploymentTrigger().triggerReadyJobs(); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); assertTrue("All jobs consumed", mockBuildService.jobs().isEmpty()); tester.clock().advance(Duration.ofMinutes(10)); tester.deploymentTrigger().triggerReadyJobs(); assertTrue("All jobs consumed", mockBuildService.jobs().isEmpty()); } @Test public void deploymentSpecWithParallelDeployments() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("us-central-1") .parallel("us-west-1", "us-east-3") .region("eu-west-1") .build(); tester.jobCompletion(component).application(application).uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); assertEquals(1, tester.buildService().jobs().size()); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); assertEquals(2, tester.buildService().jobs().size()); tester.assertRunning(productionUsEast3, application.id()); tester.assertRunning(productionUsWest1, application.id()); tester.deploy(JobType.productionUsWest1, application, applicationPackage, false); tester.jobCompletion(JobType.productionUsWest1).application(application).submit(); assertEquals("One job still running.", JobType.productionUsEast3.jobName(), tester.buildService().jobs().get(0).jobName()); tester.deploy(JobType.productionUsEast3, application, applicationPackage, false); tester.jobCompletion(JobType.productionUsEast3).application(application).submit(); assertEquals(1, tester.buildService().jobs().size()); tester.deployAndNotify(application, applicationPackage, true, JobType.productionEuWest1); assertTrue("All jobs consumed", tester.buildService().jobs().isEmpty()); } @Test public void parallelDeploymentCompletesOutOfOrder() { DeploymentTester tester = new DeploymentTester(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .parallel("us-east-3", "us-west-1") .build(); Application app = tester.createApplication("app1", "tenant1", 1, 11L); tester.jobCompletion(component).application(app).uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.systemTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.stagingTest); tester.deploy(DeploymentJobs.JobType.productionUsWest1, app, applicationPackage); tester.jobCompletion(DeploymentJobs.JobType.productionUsWest1).application(app).submit(); assertTrue("Change is present as not all jobs are complete", tester.applications().require(app.id()).change().isPresent()); tester.deploy(DeploymentJobs.JobType.productionUsEast3, app, applicationPackage); tester.jobCompletion(JobType.productionUsEast3).application(app).submit(); assertFalse("Change has been deployed", tester.applications().require(app.id()).change().isPresent()); } @Test public void testSuccessfulDeploymentApplicationPackageChanged() { DeploymentTester tester = new DeploymentTester(); TenantName tenant = tester.controllerTester().createTenant("tenant1", "domain1", 1L); MockBuildService mockBuildService = tester.buildService(); Application application = tester.controllerTester().createApplication(tenant, "app1", "default", 1L); ApplicationPackage previousApplicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .build(); ApplicationPackage newApplicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .region("eu-west-1") .build(); tester.jobCompletion(component).application(application).uploadArtifact(newApplicationPackage).submit(); tester.deployAndNotify(application, newApplicationPackage, true, JobType.systemTest); tester.deploy(JobType.stagingTest, application, previousApplicationPackage, true); tester.deployAndNotify(application, newApplicationPackage, true, JobType.stagingTest); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionCorpUsEast1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionUsCentral1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionUsWest1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionEuWest1); assertTrue("All jobs consumed", mockBuildService.jobs().isEmpty()); } @Test public void testBlockRevisionChange() { ManualClock clock = new ManualClock(Instant.parse("2017-09-26T17:30:00.00Z")); DeploymentTester tester = new DeploymentTester(new ControllerTester(clock)); ReadyJobsTrigger readyJobsTrigger = new ReadyJobsTrigger(tester.controller(), Duration.ofHours(1), new JobControl(tester.controllerTester().curator())); Version version = Version.fromString("5.0"); tester.upgradeSystem(version); ApplicationPackageBuilder applicationPackageBuilder = new ApplicationPackageBuilder() .upgradePolicy("canary") .blockChange(true, false, "tue", "18-19", "UTC") .region("us-west-1") .region("us-central-1") .region("us-east-3"); Application app = tester.createAndDeploy("app1", 1, applicationPackageBuilder.build()); tester.clock().advance(Duration.ofHours(1)); readyJobsTrigger.run(); assertEquals(0, tester.buildService().jobs().size()); String searchDefinition = "search test {\n" + " document test {\n" + " field test type string {\n" + " }\n" + " }\n" + "}\n"; ApplicationPackage changedApplication = applicationPackageBuilder.searchDefinition(searchDefinition).build(); tester.jobCompletion(component) .application(app) .nextBuildNumber() .sourceRevision(new SourceRevision("repository1", "master", "cafed00d")) .uploadArtifact(changedApplication) .submit(); assertTrue(tester.applications().require(app.id()).change().isPresent()); tester.deployAndNotify(app, changedApplication, true, systemTest); tester.deployAndNotify(app, changedApplication, true, stagingTest); readyJobsTrigger.run(); assertEquals(0, tester.buildService().jobs().size()); tester.clock().advance(Duration.ofHours(2)); tester.deploymentTrigger().triggerReadyJobs(); tester.deployAndNotify(app, changedApplication, true, stagingTest); assertEquals(singletonList(buildJob(app, productionUsWest1)), tester.buildService().jobs()); } @Test public void testCompletionOfPartOfChangeDuringBlockWindow() { ManualClock clock = new ManualClock(Instant.parse("2017-09-26T17:30:00.00Z")); DeploymentTester tester = new DeploymentTester(new ControllerTester(clock)); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .blockChange(false, true, "tue", "18", "UTC") .region("us-west-1") .region("us-east-3") .build(); Application application = tester.createAndDeploy("app1", 1, applicationPackage); Version v1 = Version.fromString("6.1"); Version v2 = Version.fromString("6.2"); tester.upgradeSystem(v2); tester.jobCompletion(component).application(application).nextBuildNumber().uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, applicationPackage, true, stagingTest); tester.deployAndNotify(application, applicationPackage, true, systemTest); clock.advance(Duration.ofHours(1)); tester.deployAndNotify(application, applicationPackage, true, productionUsWest1); assertEquals((Long) BuildJob.defaultBuildNumber, tester.application(application.id()).deploymentJobs().jobStatus() .get(productionUsWest1).lastSuccess().get().application().buildNumber().get()); assertEquals((Long) (BuildJob.defaultBuildNumber + 1), tester.application(application.id()).outstandingChange().application().get().buildNumber().get()); tester.readyJobTrigger().maintain(); assertTrue(tester.buildService().jobs().isEmpty()); tester.jobCompletion(component).application(application).nextBuildNumber().nextBuildNumber().uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, applicationPackage, true, stagingTest); tester.deployAndNotify(application, applicationPackage, true, systemTest); tester.deployAndNotify(application, applicationPackage, true, productionUsWest1); tester.deployAndNotify(application, applicationPackage, true, systemTest); tester.deployAndNotify(application, applicationPackage, true, stagingTest); tester.deployAndNotify(application, applicationPackage, true, productionUsEast3); tester.deployAndNotify(application, applicationPackage, true, systemTest); tester.deployAndNotify(application, applicationPackage, true, stagingTest); assertTrue(tester.buildService().jobs().isEmpty()); assertEquals(Change.of(v2), tester.application(application.id()).change()); clock.advance(Duration.ofHours(1)); tester.readyJobTrigger().maintain(); tester.deployAndNotify(application, applicationPackage, true, stagingTest); tester.deployAndNotify(application, applicationPackage, true, productionUsEast3); assertFalse(tester.application(application.id()).change().isPresent()); assertFalse(tester.application(application.id()).outstandingChange().isPresent()); } @Test public void testUpgradingButNoJobStarted() { DeploymentTester tester = new DeploymentTester(); ReadyJobsTrigger readyJobsTrigger = new ReadyJobsTrigger(tester.controller(), Duration.ofHours(1), new JobControl(tester.controllerTester().curator())); Application app = tester.createAndDeploy("default0", 3, "default"); tester.controller().applications().lockOrThrow(app.id(), locked -> { tester.controller().applications().store(locked.withChange(Change.of(Version.fromString("6.2")))); }); assertEquals(0, tester.buildService().jobs().size()); readyJobsTrigger.run(); tester.assertRunning(systemTest, app.id()); tester.assertRunning(stagingTest, app.id()); } @Test public void applicationVersionIsNotDowngraded() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); Supplier<Application> app = () -> tester.application(application.id()); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("us-central-1") .region("eu-west-1") .build(); tester.deployCompletely(application, applicationPackage); tester.completeDeploymentWithError(application, applicationPackage, BuildJob.defaultBuildNumber + 1, productionUsCentral1); tester.deployAndNotify(application, false, productionUsCentral1); tester.deploy(productionUsCentral1, application, Optional.empty(), false); ApplicationVersion appVersion1 = ApplicationVersion.from(BuildJob.defaultSourceRevision, BuildJob.defaultBuildNumber + 1); assertEquals(appVersion1, app.get().deployments().get(ZoneId.from("prod.us-central-1")).applicationVersion()); tester.deploymentTrigger().cancelChange(application.id(), true); assertEquals(Change.of(appVersion1), app.get().change()); tester.deploymentTrigger().cancelChange(application.id(), false); assertEquals(Change.empty(), app.get().change()); Version version1 = new Version("6.2"); tester.upgradeSystem(version1); tester.jobCompletion(productionUsCentral1).application(application).unsuccessful().submit(); tester.deployAndNotify(application, true, systemTest); tester.deployAndNotify(application, true, stagingTest); tester.deployAndNotify(application, false, productionUsCentral1); tester.deployAndNotify(application, true, systemTest); tester.deployAndNotify(application, true, stagingTest); tester.deployAndNotify(application, true, productionUsCentral1); tester.deployAndNotify(application, true, productionEuWest1); assertEquals(appVersion1, app.get().deployments().get(ZoneId.from("prod.us-central-1")).applicationVersion()); } @Test public void stepIsCompletePreciselyWhenItShouldBe() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); Supplier<Application> app = () -> tester.application(application.id()); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("us-central-1") .region("eu-west-1") .upgradePolicy("canary") .build(); tester.deployCompletely(application, applicationPackage); Version v2 = new Version("7.2"); tester.upgradeSystem(v2); tester.completeUpgradeWithError(application, v2, applicationPackage, productionUsCentral1); tester.deploy(productionUsCentral1, application, applicationPackage); tester.deployAndNotify(application, applicationPackage, false, productionUsCentral1); assertEquals(v2, app.get().deployments().get(productionUsCentral1.zone(main).get()).version()); tester.deploymentTrigger().cancelChange(application.id(), false); tester.deployAndNotify(application, applicationPackage, false, productionUsCentral1); Instant triggered = app.get().deploymentJobs().jobStatus().get(productionUsCentral1).lastTriggered().get().at(); tester.clock().advance(Duration.ofHours(1)); Version v1 = new Version("7.1"); tester.upgradeSystem(v1); assertEquals(Change.of(v1), app.get().change()); tester.deployAndNotify(application, applicationPackage, true, systemTest); tester.deployAndNotify(application, applicationPackage, true, stagingTest); assertEquals(triggered, app.get().deploymentJobs().jobStatus().get(productionUsCentral1).lastTriggered().get().at()); tester.deployAndNotify(application, applicationPackage, false, productionEuWest1); tester.jobCompletion(component).application(application).nextBuildNumber().uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, applicationPackage, false, productionEuWest1); tester.deployAndNotify(application, applicationPackage, true, systemTest); tester.deployAndNotify(application, applicationPackage, true, stagingTest); tester.assertRunning(productionUsCentral1, application.id()); assertEquals(v2, app.get().deployments().get(productionUsCentral1.zone(main).get()).version()); assertEquals(Long.valueOf(42L), app.get().deployments().get(productionUsCentral1.zone(main).get()).applicationVersion().buildNumber().get()); assertNotEquals(triggered, app.get().deploymentJobs().jobStatus().get(productionUsCentral1).lastTriggered().get().at()); tester.deployAndNotify(application, applicationPackage, false, productionUsCentral1); tester.deploy(productionUsCentral1, application, applicationPackage); assertEquals(v2, app.get().deployments().get(productionUsCentral1.zone(main).get()).version()); assertEquals(Long.valueOf(43), app.get().deployments().get(productionUsCentral1.zone(main).get()).applicationVersion().buildNumber().get()); tester.clock().advance(Duration.ofHours(2).plus(Duration.ofSeconds(1))); tester.readyJobTrigger().maintain(); tester.assertNotRunning(productionUsCentral1, application.id()); tester.deployAndNotify(application, true, systemTest); tester.deployAndNotify(application, true, stagingTest); tester.deployAndNotify(application, applicationPackage, true, productionEuWest1); assertFalse(app.get().change().isPresent()); assertFalse(app.get().deploymentJobs().jobStatus().get(productionUsCentral1).isSuccess()); } @Test public void eachDeployTargetIsTested() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); Supplier<Application> app = () -> tester.application(application.id()); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .parallel("eu-west-1", "us-east-3") .build(); tester.deployCompletely(application, applicationPackage); Version v1 = new Version("6.1"); Version v2 = new Version("6.2"); tester.upgradeSystem(v2); tester.deployAndNotify(application, true, systemTest); tester.deployAndNotify(application, true, stagingTest); tester.deploymentTrigger().cancelChange(application.id(), true); tester.deploy(productionEuWest1, application, applicationPackage); assertEquals(v2, app.get().deployments().get(productionEuWest1.zone(main).get()).version()); assertEquals(v1, app.get().deployments().get(productionUsEast3.zone(main).get()).version()); tester.jobCompletion(component).application(application).nextBuildNumber().uploadArtifact(applicationPackage).submit(); Version firstTested = app.get().deploymentJobs().jobStatus().get(systemTest).lastTriggered().get().platform(); assertEquals(firstTested, app.get().deploymentJobs().jobStatus().get(stagingTest).lastTriggered().get().platform()); tester.deployAndNotify(application, true, systemTest); tester.deployAndNotify(application, true, stagingTest); assertEquals(firstTested, app.get().deploymentJobs().jobStatus().get(systemTest).lastTriggered().get().platform()); assertEquals(firstTested, app.get().deploymentJobs().jobStatus().get(stagingTest).lastTriggered().get().platform()); tester.deployAndNotify(application, applicationPackage, false, productionEuWest1); tester.deployAndNotify(application, applicationPackage, false, productionUsEast3); tester.triggerUntilQuiescence(); assertNotEquals(firstTested, app.get().deploymentJobs().jobStatus().get(systemTest).lastTriggered().get().platform()); assertNotEquals(firstTested, app.get().deploymentJobs().jobStatus().get(stagingTest).lastTriggered().get().platform()); tester.deployAndNotify(application, true, systemTest); tester.deployAndNotify(application, true, stagingTest); tester.deployAndNotify(application, false, productionEuWest1); tester.deployAndNotify(application, false, productionUsEast3); tester.deployAndNotify(application, true, productionUsEast3); tester.deployAndNotify(application, true, productionEuWest1); assertFalse(app.get().change().isPresent()); assertEquals(43, app.get().deploymentJobs().jobStatus().get(productionEuWest1).lastSuccess().get().application().buildNumber().get().longValue()); assertEquals(43, app.get().deploymentJobs().jobStatus().get(productionUsEast3).lastSuccess().get().application().buildNumber().get().longValue()); } @Test public void eachDifferentUpgradeCombinationIsTested() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); Supplier<Application> app = () -> tester.application(application.id()); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("us-central-1") .parallel("eu-west-1", "us-east-3") .build(); tester.deployCompletely(application, applicationPackage); Version v1 = new Version("6.1"); Version v2 = new Version("6.2"); tester.upgradeSystem(v2); tester.deployAndNotify(application, true, systemTest); tester.deployAndNotify(application, true, stagingTest); tester.deployAndNotify(application, true, productionUsCentral1); tester.deployAndNotify(application, true, productionEuWest1); tester.deployAndNotify(application, false, productionUsEast3); assertEquals(v2, app.get().deployments().get(ZoneId.from("prod", "us-central-1")).version()); assertEquals(v2, app.get().deployments().get(ZoneId.from("prod", "eu-west-1")).version()); assertEquals(v1, app.get().deployments().get(ZoneId.from("prod", "us-east-3")).version()); Version v3 = new Version("6.3"); tester.upgradeSystem(v3); tester.deployAndNotify(application, false, productionUsEast3); tester.deployAndNotify(application, true, systemTest); tester.deployAndNotify(application, true, stagingTest); assertEquals(v2, app.get().deploymentJobs().jobStatus().get(stagingTest).lastSuccess().get().sourcePlatform().get()); tester.deployAndNotify(application, true, productionUsCentral1); assertEquals(v1, app.get().deploymentJobs().jobStatus().get(stagingTest).lastTriggered().get().sourcePlatform().get()); tester.deployAndNotify(application, true, stagingTest); tester.deployAndNotify(application, true, productionEuWest1); tester.deployAndNotify(application, true, productionUsEast3); } @Test }
class DeploymentTriggerTest { @Test public void testTriggerFailing() { DeploymentTester tester = new DeploymentTester(); Application app = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .upgradePolicy("default") .environment(Environment.prod) .region("us-west-1") .build(); Version version = new Version(5, 1); tester.upgradeSystem(version); tester.jobCompletion(component).application(app).uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.systemTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.stagingTest); tester.deployAndNotify(app, applicationPackage, true, JobType.productionUsWest1); version = new Version(5, 2); tester.upgradeSystem(version); tester.buildService().remove(buildJob(app, stagingTest)); tester.readyJobTrigger().maintain(); assertEquals("Retried dead job", 2, tester.buildService().jobs().size()); tester.assertRunning(stagingTest, app.id()); tester.deployAndNotify(app, applicationPackage, true, stagingTest); tester.assertRunning(systemTest, app.id()); assertEquals(1, tester.buildService().jobs().size()); tester.deployAndNotify(app, applicationPackage, false, JobType.systemTest); assertEquals("Job is retried on failure", 1, tester.buildService().jobs().size()); tester.deployAndNotify(app, applicationPackage, true, JobType.systemTest); tester.assertRunning(productionUsWest1, app.id()); } @Test public void deploymentSpecDecidesTriggerOrder() { DeploymentTester tester = new DeploymentTester(); TenantName tenant = tester.controllerTester().createTenant("tenant1", "domain1", 1L); MockBuildService mockBuildService = tester.buildService(); Application application = tester.controllerTester().createApplication(tenant, "app1", "default", 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .build(); tester.jobCompletion(component).application(application).uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); tester.deployAndNotify(application, applicationPackage, true, JobType.productionCorpUsEast1); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsWest1); assertTrue("All jobs consumed", mockBuildService.jobs().isEmpty()); } @Test public void deploymentsSpecWithDelays() { DeploymentTester tester = new DeploymentTester(); MockBuildService mockBuildService = tester.buildService(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .delay(Duration.ofSeconds(30)) .region("us-west-1") .delay(Duration.ofMinutes(2)) .delay(Duration.ofMinutes(2)) .region("us-central-1") .delay(Duration.ofMinutes(10)) .build(); tester.jobCompletion(component).application(application).uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); tester.deploymentTrigger().triggerReadyJobs(); assertEquals(0, mockBuildService.jobs().size()); tester.clock().advance(Duration.ofSeconds(30)); tester.deploymentTrigger().triggerReadyJobs(); assertEquals(1, mockBuildService.jobs().size()); tester.assertRunning(productionUsWest1, application.id()); tester.clock().advance(Duration.ofMinutes(3)); tester.deploymentTrigger().triggerReadyJobs(); assertEquals(1, mockBuildService.jobs().size()); tester.assertRunning(productionUsWest1, application.id()); tester.deployAndNotify(application, applicationPackage, true, productionUsWest1); tester.deploymentTrigger().triggerReadyJobs(); assertTrue("No more jobs triggered at this time", mockBuildService.jobs().isEmpty()); tester.clock().advance(Duration.ofMinutes(3)); tester.deploymentTrigger().triggerReadyJobs(); assertTrue("No more jobs triggered at this time", mockBuildService.jobs().isEmpty()); tester.clock().advance(Duration.ofMinutes(1)); tester.deploymentTrigger().triggerReadyJobs(); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); assertTrue("All jobs consumed", mockBuildService.jobs().isEmpty()); tester.clock().advance(Duration.ofMinutes(10)); tester.deploymentTrigger().triggerReadyJobs(); assertTrue("All jobs consumed", mockBuildService.jobs().isEmpty()); } @Test public void deploymentSpecWithParallelDeployments() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("us-central-1") .parallel("us-west-1", "us-east-3") .region("eu-west-1") .build(); tester.jobCompletion(component).application(application).uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, applicationPackage, true, JobType.systemTest); tester.deployAndNotify(application, applicationPackage, true, JobType.stagingTest); assertEquals(1, tester.buildService().jobs().size()); tester.deployAndNotify(application, applicationPackage, true, JobType.productionUsCentral1); assertEquals(2, tester.buildService().jobs().size()); tester.assertRunning(productionUsEast3, application.id()); tester.assertRunning(productionUsWest1, application.id()); tester.deploy(JobType.productionUsWest1, application, applicationPackage, false); tester.jobCompletion(JobType.productionUsWest1).application(application).submit(); assertEquals("One job still running.", JobType.productionUsEast3.jobName(), tester.buildService().jobs().get(0).jobName()); tester.deploy(JobType.productionUsEast3, application, applicationPackage, false); tester.jobCompletion(JobType.productionUsEast3).application(application).submit(); assertEquals(1, tester.buildService().jobs().size()); tester.deployAndNotify(application, applicationPackage, true, JobType.productionEuWest1); assertTrue("All jobs consumed", tester.buildService().jobs().isEmpty()); } @Test public void parallelDeploymentCompletesOutOfOrder() { DeploymentTester tester = new DeploymentTester(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .parallel("us-east-3", "us-west-1") .build(); Application app = tester.createApplication("app1", "tenant1", 1, 11L); tester.jobCompletion(component).application(app).uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.systemTest); tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.stagingTest); tester.deploy(DeploymentJobs.JobType.productionUsWest1, app, applicationPackage); tester.jobCompletion(DeploymentJobs.JobType.productionUsWest1).application(app).submit(); assertTrue("Change is present as not all jobs are complete", tester.applications().require(app.id()).change().isPresent()); tester.deploy(DeploymentJobs.JobType.productionUsEast3, app, applicationPackage); tester.jobCompletion(JobType.productionUsEast3).application(app).submit(); assertFalse("Change has been deployed", tester.applications().require(app.id()).change().isPresent()); } @Test public void testSuccessfulDeploymentApplicationPackageChanged() { DeploymentTester tester = new DeploymentTester(); TenantName tenant = tester.controllerTester().createTenant("tenant1", "domain1", 1L); MockBuildService mockBuildService = tester.buildService(); Application application = tester.controllerTester().createApplication(tenant, "app1", "default", 1L); ApplicationPackage previousApplicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .build(); ApplicationPackage newApplicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("corp-us-east-1") .region("us-central-1") .region("us-west-1") .region("eu-west-1") .build(); tester.jobCompletion(component).application(application).uploadArtifact(newApplicationPackage).submit(); tester.deployAndNotify(application, newApplicationPackage, true, JobType.systemTest); tester.deploy(JobType.stagingTest, application, previousApplicationPackage, true); tester.deployAndNotify(application, newApplicationPackage, true, JobType.stagingTest); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionCorpUsEast1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionUsCentral1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionUsWest1); tester.deployAndNotify(application, newApplicationPackage, true, JobType.productionEuWest1); assertTrue("All jobs consumed", mockBuildService.jobs().isEmpty()); } @Test public void testBlockRevisionChange() { ManualClock clock = new ManualClock(Instant.parse("2017-09-26T17:30:00.00Z")); DeploymentTester tester = new DeploymentTester(new ControllerTester(clock)); ReadyJobsTrigger readyJobsTrigger = new ReadyJobsTrigger(tester.controller(), Duration.ofHours(1), new JobControl(tester.controllerTester().curator())); Version version = Version.fromString("5.0"); tester.upgradeSystem(version); ApplicationPackageBuilder applicationPackageBuilder = new ApplicationPackageBuilder() .upgradePolicy("canary") .blockChange(true, false, "tue", "18-19", "UTC") .region("us-west-1") .region("us-central-1") .region("us-east-3"); Application app = tester.createAndDeploy("app1", 1, applicationPackageBuilder.build()); tester.clock().advance(Duration.ofHours(1)); readyJobsTrigger.run(); assertEquals(0, tester.buildService().jobs().size()); String searchDefinition = "search test {\n" + " document test {\n" + " field test type string {\n" + " }\n" + " }\n" + "}\n"; ApplicationPackage changedApplication = applicationPackageBuilder.searchDefinition(searchDefinition).build(); tester.jobCompletion(component) .application(app) .nextBuildNumber() .sourceRevision(new SourceRevision("repository1", "master", "cafed00d")) .uploadArtifact(changedApplication) .submit(); assertTrue(tester.applications().require(app.id()).change().isPresent()); tester.deployAndNotify(app, changedApplication, true, systemTest); tester.deployAndNotify(app, changedApplication, true, stagingTest); readyJobsTrigger.run(); assertEquals(0, tester.buildService().jobs().size()); tester.clock().advance(Duration.ofHours(2)); tester.deploymentTrigger().triggerReadyJobs(); tester.deployAndNotify(app, changedApplication, true, stagingTest); assertEquals(singletonList(buildJob(app, productionUsWest1)), tester.buildService().jobs()); } @Test public void testCompletionOfPartOfChangeDuringBlockWindow() { ManualClock clock = new ManualClock(Instant.parse("2017-09-26T17:30:00.00Z")); DeploymentTester tester = new DeploymentTester(new ControllerTester(clock)); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .blockChange(false, true, "tue", "18", "UTC") .region("us-west-1") .region("us-east-3") .build(); Application application = tester.createAndDeploy("app1", 1, applicationPackage); Version v1 = Version.fromString("6.1"); Version v2 = Version.fromString("6.2"); tester.upgradeSystem(v2); tester.jobCompletion(component).application(application).nextBuildNumber().uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, applicationPackage, true, stagingTest); tester.deployAndNotify(application, applicationPackage, true, systemTest); clock.advance(Duration.ofHours(1)); tester.deployAndNotify(application, applicationPackage, true, productionUsWest1); assertEquals((Long) BuildJob.defaultBuildNumber, tester.application(application.id()).deploymentJobs().jobStatus() .get(productionUsWest1).lastSuccess().get().application().buildNumber().get()); assertEquals((Long) (BuildJob.defaultBuildNumber + 1), tester.application(application.id()).outstandingChange().application().get().buildNumber().get()); tester.readyJobTrigger().maintain(); assertTrue(tester.buildService().jobs().isEmpty()); tester.jobCompletion(component).application(application).nextBuildNumber().nextBuildNumber().uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, applicationPackage, true, stagingTest); tester.deployAndNotify(application, applicationPackage, true, systemTest); tester.deployAndNotify(application, applicationPackage, true, productionUsWest1); tester.deployAndNotify(application, applicationPackage, true, systemTest); tester.deployAndNotify(application, applicationPackage, true, stagingTest); tester.deployAndNotify(application, applicationPackage, true, productionUsEast3); tester.deployAndNotify(application, applicationPackage, true, systemTest); tester.deployAndNotify(application, applicationPackage, true, stagingTest); assertTrue(tester.buildService().jobs().isEmpty()); assertEquals(Change.of(v2), tester.application(application.id()).change()); clock.advance(Duration.ofHours(1)); tester.readyJobTrigger().maintain(); tester.deployAndNotify(application, applicationPackage, true, stagingTest); tester.deployAndNotify(application, applicationPackage, true, productionUsEast3); assertFalse(tester.application(application.id()).change().isPresent()); assertFalse(tester.application(application.id()).outstandingChange().isPresent()); } @Test public void testUpgradingButNoJobStarted() { DeploymentTester tester = new DeploymentTester(); ReadyJobsTrigger readyJobsTrigger = new ReadyJobsTrigger(tester.controller(), Duration.ofHours(1), new JobControl(tester.controllerTester().curator())); Application app = tester.createAndDeploy("default0", 3, "default"); tester.controller().applications().lockOrThrow(app.id(), locked -> { tester.controller().applications().store(locked.withChange(Change.of(Version.fromString("6.2")))); }); assertEquals(0, tester.buildService().jobs().size()); readyJobsTrigger.run(); tester.assertRunning(systemTest, app.id()); tester.assertRunning(stagingTest, app.id()); } @Test public void applicationVersionIsNotDowngraded() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); Supplier<Application> app = () -> tester.application(application.id()); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("us-central-1") .region("eu-west-1") .build(); tester.deployCompletely(application, applicationPackage); tester.completeDeploymentWithError(application, applicationPackage, BuildJob.defaultBuildNumber + 1, productionUsCentral1); tester.deployAndNotify(application, false, productionUsCentral1); tester.deploy(productionUsCentral1, application, Optional.empty(), false); ApplicationVersion appVersion1 = ApplicationVersion.from(BuildJob.defaultSourceRevision, BuildJob.defaultBuildNumber + 1); assertEquals(appVersion1, app.get().deployments().get(ZoneId.from("prod.us-central-1")).applicationVersion()); tester.deploymentTrigger().cancelChange(application.id(), true); assertEquals(Change.of(appVersion1), app.get().change()); tester.deploymentTrigger().cancelChange(application.id(), false); assertEquals(Change.empty(), app.get().change()); Version version1 = new Version("6.2"); tester.upgradeSystem(version1); tester.jobCompletion(productionUsCentral1).application(application).unsuccessful().submit(); tester.deployAndNotify(application, true, systemTest); tester.deployAndNotify(application, true, stagingTest); tester.deployAndNotify(application, false, productionUsCentral1); tester.deployAndNotify(application, true, systemTest); tester.deployAndNotify(application, true, stagingTest); tester.deployAndNotify(application, true, productionUsCentral1); tester.deployAndNotify(application, true, productionEuWest1); assertEquals(appVersion1, app.get().deployments().get(ZoneId.from("prod.us-central-1")).applicationVersion()); } @Test public void stepIsCompletePreciselyWhenItShouldBe() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); Supplier<Application> app = () -> tester.application(application.id()); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("us-central-1") .region("eu-west-1") .upgradePolicy("canary") .build(); tester.deployCompletely(application, applicationPackage); Version v2 = new Version("7.2"); tester.upgradeSystem(v2); tester.completeUpgradeWithError(application, v2, applicationPackage, productionUsCentral1); tester.deploy(productionUsCentral1, application, applicationPackage); tester.deployAndNotify(application, applicationPackage, false, productionUsCentral1); assertEquals(v2, app.get().deployments().get(productionUsCentral1.zone(main).get()).version()); tester.deploymentTrigger().cancelChange(application.id(), false); tester.deployAndNotify(application, applicationPackage, false, productionUsCentral1); Instant triggered = app.get().deploymentJobs().jobStatus().get(productionUsCentral1).lastTriggered().get().at(); tester.clock().advance(Duration.ofHours(1)); Version v1 = new Version("7.1"); tester.upgradeSystem(v1); assertEquals(Change.of(v1), app.get().change()); tester.deployAndNotify(application, applicationPackage, true, systemTest); tester.deployAndNotify(application, applicationPackage, true, stagingTest); assertEquals(triggered, app.get().deploymentJobs().jobStatus().get(productionUsCentral1).lastTriggered().get().at()); tester.deployAndNotify(application, applicationPackage, false, productionEuWest1); tester.jobCompletion(component).application(application).nextBuildNumber().uploadArtifact(applicationPackage).submit(); tester.deployAndNotify(application, applicationPackage, false, productionEuWest1); tester.deployAndNotify(application, applicationPackage, true, systemTest); tester.deployAndNotify(application, applicationPackage, true, stagingTest); tester.assertRunning(productionUsCentral1, application.id()); assertEquals(v2, app.get().deployments().get(productionUsCentral1.zone(main).get()).version()); assertEquals(Long.valueOf(42L), app.get().deployments().get(productionUsCentral1.zone(main).get()).applicationVersion().buildNumber().get()); assertNotEquals(triggered, app.get().deploymentJobs().jobStatus().get(productionUsCentral1).lastTriggered().get().at()); tester.deployAndNotify(application, applicationPackage, false, productionUsCentral1); tester.deploy(productionUsCentral1, application, applicationPackage); assertEquals(v2, app.get().deployments().get(productionUsCentral1.zone(main).get()).version()); assertEquals(Long.valueOf(43), app.get().deployments().get(productionUsCentral1.zone(main).get()).applicationVersion().buildNumber().get()); tester.clock().advance(Duration.ofHours(2).plus(Duration.ofSeconds(1))); tester.readyJobTrigger().maintain(); tester.assertNotRunning(productionUsCentral1, application.id()); tester.deployAndNotify(application, true, systemTest); tester.deployAndNotify(application, true, stagingTest); tester.deployAndNotify(application, applicationPackage, true, productionEuWest1); assertFalse(app.get().change().isPresent()); assertFalse(app.get().deploymentJobs().jobStatus().get(productionUsCentral1).isSuccess()); } @Test public void eachDeployTargetIsTested() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); Supplier<Application> app = () -> tester.application(application.id()); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .parallel("eu-west-1", "us-east-3") .build(); tester.deployCompletely(application, applicationPackage); Version v1 = new Version("6.1"); Version v2 = new Version("6.2"); tester.upgradeSystem(v2); tester.deployAndNotify(application, true, systemTest); tester.deployAndNotify(application, true, stagingTest); tester.deploymentTrigger().cancelChange(application.id(), true); tester.deploy(productionEuWest1, application, applicationPackage); assertEquals(v2, app.get().deployments().get(productionEuWest1.zone(main).get()).version()); assertEquals(v1, app.get().deployments().get(productionUsEast3.zone(main).get()).version()); tester.jobCompletion(component).application(application).nextBuildNumber().uploadArtifact(applicationPackage).submit(); Version firstTested = app.get().deploymentJobs().jobStatus().get(systemTest).lastTriggered().get().platform(); assertEquals(firstTested, app.get().deploymentJobs().jobStatus().get(stagingTest).lastTriggered().get().platform()); tester.deployAndNotify(application, true, systemTest); tester.deployAndNotify(application, true, stagingTest); assertEquals(firstTested, app.get().deploymentJobs().jobStatus().get(systemTest).lastTriggered().get().platform()); assertEquals(firstTested, app.get().deploymentJobs().jobStatus().get(stagingTest).lastTriggered().get().platform()); tester.deployAndNotify(application, applicationPackage, false, productionEuWest1); tester.deployAndNotify(application, applicationPackage, false, productionUsEast3); tester.triggerUntilQuiescence(); assertNotEquals(firstTested, app.get().deploymentJobs().jobStatus().get(systemTest).lastTriggered().get().platform()); assertNotEquals(firstTested, app.get().deploymentJobs().jobStatus().get(stagingTest).lastTriggered().get().platform()); tester.deployAndNotify(application, true, systemTest); tester.deployAndNotify(application, true, stagingTest); tester.deployAndNotify(application, false, productionEuWest1); tester.deployAndNotify(application, false, productionUsEast3); tester.deployAndNotify(application, true, productionUsEast3); tester.deployAndNotify(application, true, productionEuWest1); assertFalse(app.get().change().isPresent()); assertEquals(43, app.get().deploymentJobs().jobStatus().get(productionEuWest1).lastSuccess().get().application().buildNumber().get().longValue()); assertEquals(43, app.get().deploymentJobs().jobStatus().get(productionUsEast3).lastSuccess().get().application().buildNumber().get().longValue()); } @Test public void eachDifferentUpgradeCombinationIsTested() { DeploymentTester tester = new DeploymentTester(); Application application = tester.createApplication("app1", "tenant1", 1, 1L); Supplier<Application> app = () -> tester.application(application.id()); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .environment(Environment.prod) .region("us-central-1") .parallel("eu-west-1", "us-east-3") .build(); tester.deployCompletely(application, applicationPackage); Version v1 = new Version("6.1"); Version v2 = new Version("6.2"); tester.upgradeSystem(v2); tester.deployAndNotify(application, true, systemTest); tester.deployAndNotify(application, true, stagingTest); tester.deployAndNotify(application, true, productionUsCentral1); tester.deployAndNotify(application, true, productionEuWest1); tester.deployAndNotify(application, false, productionUsEast3); assertEquals(v2, app.get().deployments().get(ZoneId.from("prod", "us-central-1")).version()); assertEquals(v2, app.get().deployments().get(ZoneId.from("prod", "eu-west-1")).version()); assertEquals(v1, app.get().deployments().get(ZoneId.from("prod", "us-east-3")).version()); Version v3 = new Version("6.3"); tester.upgradeSystem(v3); tester.deployAndNotify(application, false, productionUsEast3); tester.deployAndNotify(application, true, systemTest); tester.deployAndNotify(application, true, stagingTest); assertEquals(v2, app.get().deploymentJobs().jobStatus().get(stagingTest).lastSuccess().get().sourcePlatform().get()); tester.deployAndNotify(application, true, productionUsCentral1); assertEquals(v1, app.get().deploymentJobs().jobStatus().get(stagingTest).lastTriggered().get().sourcePlatform().get()); tester.deployAndNotify(application, true, stagingTest); tester.deployAndNotify(application, true, productionEuWest1); tester.deployAndNotify(application, true, productionUsEast3); } @Test }
Should `jaxRsStrategy.setMaxIterations(2);` be called prior to this, or is it OK to depend on the default value here?
public void testRetryGivesUpAfterTwoLoopsOverAvailableServers() { testRetryGivesUpAfterXIterations(2); }
testRetryGivesUpAfterXIterations(2);
public void testRetryGivesUpAfterTwoLoopsOverAvailableServers() { testRetryGivesUpAfterXIterations(2); }
class RetryingJaxRsStrategyTest { private static final String API_PATH = "/"; @Path(API_PATH) private interface TestJaxRsApi { @GET @Path("/foo/bar") String doSomething(); } private static final Set<HostName> SERVER_HOSTS = new HashSet<>(Arrays.asList( new HostName("host-1"), new HostName("host-2"), new HostName("host-3"))); private static final int REST_PORT = Defaults.getDefaults().vespaWebServicePort(); private final JaxRsClientFactory jaxRsClientFactory = mock(JaxRsClientFactory.class); private final TestJaxRsApi mockApi = mock(TestJaxRsApi.class); private final RetryingJaxRsStrategy<TestJaxRsApi> jaxRsStrategy = new RetryingJaxRsStrategy<>( SERVER_HOSTS, REST_PORT, jaxRsClientFactory, TestJaxRsApi.class, API_PATH, "http"); @Before public void setup() { when(jaxRsClientFactory.createClient(eq(TestJaxRsApi.class), any(HostName.class), anyInt(), anyString(), anyString())) .thenReturn(mockApi); } @Test public void noRetryIfNoFailure() throws Exception { jaxRsStrategy.apply(TestJaxRsApi::doSomething); verify(mockApi, times(1)).doSomething(); final ArgumentCaptor<HostName> hostNameCaptor = ArgumentCaptor.forClass(HostName.class); verify(jaxRsClientFactory, times(1)) .createClient(eq(TestJaxRsApi.class), hostNameCaptor.capture(), eq(REST_PORT), eq(API_PATH), eq("http")); assertThat(SERVER_HOSTS.contains(hostNameCaptor.getValue()), is(true)); } @Test public void testRetryAfterSingleFailure() throws Exception { when(mockApi.doSomething()) .thenThrow(new ProcessingException("Fake timeout induced by test")) .thenReturn("a response"); jaxRsStrategy.apply(TestJaxRsApi::doSomething); verify(mockApi, times(2)).doSomething(); } @Test public void testRetryUsesAllAvailableServers() throws Exception { when(mockApi.doSomething()) .thenThrow(new ProcessingException("Fake timeout 1 induced by test")) .thenThrow(new ProcessingException("Fake timeout 2 induced by test")) .thenReturn("a response"); jaxRsStrategy.apply(TestJaxRsApi::doSomething); verify(mockApi, times(3)).doSomething(); verifyAllServersContacted(jaxRsClientFactory); } @Test public void testRetryLoopsOverAvailableServers() throws Exception { when(mockApi.doSomething()) .thenThrow(new ProcessingException("Fake timeout 1 induced by test")) .thenThrow(new ProcessingException("Fake timeout 2 induced by test")) .thenThrow(new ProcessingException("Fake timeout 3 induced by test")) .thenThrow(new ProcessingException("Fake timeout 4 induced by test")) .thenReturn("a response"); jaxRsStrategy.apply(TestJaxRsApi::doSomething); verify(mockApi, times(5)).doSomething(); verifyAllServersContacted(jaxRsClientFactory); } @Test public void testRetryGivesUpAfterOneLoopOverAvailableServers() { jaxRsStrategy.setMaxIterations(1); testRetryGivesUpAfterXIterations(1); } @Test private void testRetryGivesUpAfterXIterations(int iterations) { OngoingStubbing<String> stub = when(mockApi.doSomething()); for (int i = 0; i < iterations; ++i) { stub = stub .thenThrow(new ProcessingException("Fake timeout 1 iteration " + i)) .thenThrow(new ProcessingException("Fake timeout 2 iteration " + i)) .thenThrow(new ProcessingException("Fake timeout 3 iteration " + i)); } try { jaxRsStrategy.apply(TestJaxRsApi::doSomething); fail("Exception should be thrown from above statement"); } catch (IOException e) { } verify(mockApi, times(iterations * 3)).doSomething(); verifyAllServersContacted(jaxRsClientFactory); } private static void verifyAllServersContacted( final JaxRsClientFactory jaxRsClientFactory) { final ArgumentCaptor<HostName> hostNameCaptor = ArgumentCaptor.forClass(HostName.class); verify(jaxRsClientFactory, atLeast(SERVER_HOSTS.size())) .createClient(eq(TestJaxRsApi.class), hostNameCaptor.capture(), eq(REST_PORT), eq(API_PATH), eq("http")); final Set<HostName> actualServerHostsContacted = new HashSet<>(hostNameCaptor.getAllValues()); assertThat(actualServerHostsContacted, equalTo(SERVER_HOSTS)); } }
class RetryingJaxRsStrategyTest { private static final String API_PATH = "/"; @Path(API_PATH) private interface TestJaxRsApi { @GET @Path("/foo/bar") String doSomething(); } private static final Set<HostName> SERVER_HOSTS = new HashSet<>(Arrays.asList( new HostName("host-1"), new HostName("host-2"), new HostName("host-3"))); private static final int REST_PORT = Defaults.getDefaults().vespaWebServicePort(); private final JaxRsClientFactory jaxRsClientFactory = mock(JaxRsClientFactory.class); private final TestJaxRsApi mockApi = mock(TestJaxRsApi.class); private final RetryingJaxRsStrategy<TestJaxRsApi> jaxRsStrategy = new RetryingJaxRsStrategy<>( SERVER_HOSTS, REST_PORT, jaxRsClientFactory, TestJaxRsApi.class, API_PATH, "http"); @Before public void setup() { when(jaxRsClientFactory.createClient(eq(TestJaxRsApi.class), any(HostName.class), anyInt(), anyString(), anyString())) .thenReturn(mockApi); } @Test public void noRetryIfNoFailure() throws Exception { jaxRsStrategy.apply(TestJaxRsApi::doSomething); verify(mockApi, times(1)).doSomething(); final ArgumentCaptor<HostName> hostNameCaptor = ArgumentCaptor.forClass(HostName.class); verify(jaxRsClientFactory, times(1)) .createClient(eq(TestJaxRsApi.class), hostNameCaptor.capture(), eq(REST_PORT), eq(API_PATH), eq("http")); assertThat(SERVER_HOSTS.contains(hostNameCaptor.getValue()), is(true)); } @Test public void testRetryAfterSingleFailure() throws Exception { when(mockApi.doSomething()) .thenThrow(new ProcessingException("Fake timeout induced by test")) .thenReturn("a response"); jaxRsStrategy.apply(TestJaxRsApi::doSomething); verify(mockApi, times(2)).doSomething(); } @Test public void testRetryUsesAllAvailableServers() throws Exception { when(mockApi.doSomething()) .thenThrow(new ProcessingException("Fake timeout 1 induced by test")) .thenThrow(new ProcessingException("Fake timeout 2 induced by test")) .thenReturn("a response"); jaxRsStrategy.apply(TestJaxRsApi::doSomething); verify(mockApi, times(3)).doSomething(); verifyAllServersContacted(jaxRsClientFactory); } @Test public void testRetryLoopsOverAvailableServers() throws Exception { when(mockApi.doSomething()) .thenThrow(new ProcessingException("Fake timeout 1 induced by test")) .thenThrow(new ProcessingException("Fake timeout 2 induced by test")) .thenThrow(new ProcessingException("Fake timeout 3 induced by test")) .thenThrow(new ProcessingException("Fake timeout 4 induced by test")) .thenReturn("a response"); jaxRsStrategy.apply(TestJaxRsApi::doSomething); verify(mockApi, times(5)).doSomething(); verifyAllServersContacted(jaxRsClientFactory); } @Test public void testRetryGivesUpAfterOneLoopOverAvailableServers() { jaxRsStrategy.setMaxIterations(1); testRetryGivesUpAfterXIterations(1); } @Test private void testRetryGivesUpAfterXIterations(int iterations) { OngoingStubbing<String> stub = when(mockApi.doSomething()); for (int i = 0; i < iterations; ++i) { stub = stub .thenThrow(new ProcessingException("Fake timeout 1 iteration " + i)) .thenThrow(new ProcessingException("Fake timeout 2 iteration " + i)) .thenThrow(new ProcessingException("Fake timeout 3 iteration " + i)); } try { jaxRsStrategy.apply(TestJaxRsApi::doSomething); fail("Exception should be thrown from above statement"); } catch (IOException e) { } verify(mockApi, times(iterations * 3)).doSomething(); verifyAllServersContacted(jaxRsClientFactory); } private static void verifyAllServersContacted( final JaxRsClientFactory jaxRsClientFactory) { final ArgumentCaptor<HostName> hostNameCaptor = ArgumentCaptor.forClass(HostName.class); verify(jaxRsClientFactory, atLeast(SERVER_HOSTS.size())) .createClient(eq(TestJaxRsApi.class), hostNameCaptor.capture(), eq(REST_PORT), eq(API_PATH), eq("http")); final Set<HostName> actualServerHostsContacted = new HashSet<>(hostNameCaptor.getAllValues()); assertThat(actualServerHostsContacted, equalTo(SERVER_HOSTS)); } }
I wanted to test the default, which should still be 2.
public void testRetryGivesUpAfterTwoLoopsOverAvailableServers() { testRetryGivesUpAfterXIterations(2); }
testRetryGivesUpAfterXIterations(2);
public void testRetryGivesUpAfterTwoLoopsOverAvailableServers() { testRetryGivesUpAfterXIterations(2); }
class RetryingJaxRsStrategyTest { private static final String API_PATH = "/"; @Path(API_PATH) private interface TestJaxRsApi { @GET @Path("/foo/bar") String doSomething(); } private static final Set<HostName> SERVER_HOSTS = new HashSet<>(Arrays.asList( new HostName("host-1"), new HostName("host-2"), new HostName("host-3"))); private static final int REST_PORT = Defaults.getDefaults().vespaWebServicePort(); private final JaxRsClientFactory jaxRsClientFactory = mock(JaxRsClientFactory.class); private final TestJaxRsApi mockApi = mock(TestJaxRsApi.class); private final RetryingJaxRsStrategy<TestJaxRsApi> jaxRsStrategy = new RetryingJaxRsStrategy<>( SERVER_HOSTS, REST_PORT, jaxRsClientFactory, TestJaxRsApi.class, API_PATH, "http"); @Before public void setup() { when(jaxRsClientFactory.createClient(eq(TestJaxRsApi.class), any(HostName.class), anyInt(), anyString(), anyString())) .thenReturn(mockApi); } @Test public void noRetryIfNoFailure() throws Exception { jaxRsStrategy.apply(TestJaxRsApi::doSomething); verify(mockApi, times(1)).doSomething(); final ArgumentCaptor<HostName> hostNameCaptor = ArgumentCaptor.forClass(HostName.class); verify(jaxRsClientFactory, times(1)) .createClient(eq(TestJaxRsApi.class), hostNameCaptor.capture(), eq(REST_PORT), eq(API_PATH), eq("http")); assertThat(SERVER_HOSTS.contains(hostNameCaptor.getValue()), is(true)); } @Test public void testRetryAfterSingleFailure() throws Exception { when(mockApi.doSomething()) .thenThrow(new ProcessingException("Fake timeout induced by test")) .thenReturn("a response"); jaxRsStrategy.apply(TestJaxRsApi::doSomething); verify(mockApi, times(2)).doSomething(); } @Test public void testRetryUsesAllAvailableServers() throws Exception { when(mockApi.doSomething()) .thenThrow(new ProcessingException("Fake timeout 1 induced by test")) .thenThrow(new ProcessingException("Fake timeout 2 induced by test")) .thenReturn("a response"); jaxRsStrategy.apply(TestJaxRsApi::doSomething); verify(mockApi, times(3)).doSomething(); verifyAllServersContacted(jaxRsClientFactory); } @Test public void testRetryLoopsOverAvailableServers() throws Exception { when(mockApi.doSomething()) .thenThrow(new ProcessingException("Fake timeout 1 induced by test")) .thenThrow(new ProcessingException("Fake timeout 2 induced by test")) .thenThrow(new ProcessingException("Fake timeout 3 induced by test")) .thenThrow(new ProcessingException("Fake timeout 4 induced by test")) .thenReturn("a response"); jaxRsStrategy.apply(TestJaxRsApi::doSomething); verify(mockApi, times(5)).doSomething(); verifyAllServersContacted(jaxRsClientFactory); } @Test public void testRetryGivesUpAfterOneLoopOverAvailableServers() { jaxRsStrategy.setMaxIterations(1); testRetryGivesUpAfterXIterations(1); } @Test private void testRetryGivesUpAfterXIterations(int iterations) { OngoingStubbing<String> stub = when(mockApi.doSomething()); for (int i = 0; i < iterations; ++i) { stub = stub .thenThrow(new ProcessingException("Fake timeout 1 iteration " + i)) .thenThrow(new ProcessingException("Fake timeout 2 iteration " + i)) .thenThrow(new ProcessingException("Fake timeout 3 iteration " + i)); } try { jaxRsStrategy.apply(TestJaxRsApi::doSomething); fail("Exception should be thrown from above statement"); } catch (IOException e) { } verify(mockApi, times(iterations * 3)).doSomething(); verifyAllServersContacted(jaxRsClientFactory); } private static void verifyAllServersContacted( final JaxRsClientFactory jaxRsClientFactory) { final ArgumentCaptor<HostName> hostNameCaptor = ArgumentCaptor.forClass(HostName.class); verify(jaxRsClientFactory, atLeast(SERVER_HOSTS.size())) .createClient(eq(TestJaxRsApi.class), hostNameCaptor.capture(), eq(REST_PORT), eq(API_PATH), eq("http")); final Set<HostName> actualServerHostsContacted = new HashSet<>(hostNameCaptor.getAllValues()); assertThat(actualServerHostsContacted, equalTo(SERVER_HOSTS)); } }
class RetryingJaxRsStrategyTest { private static final String API_PATH = "/"; @Path(API_PATH) private interface TestJaxRsApi { @GET @Path("/foo/bar") String doSomething(); } private static final Set<HostName> SERVER_HOSTS = new HashSet<>(Arrays.asList( new HostName("host-1"), new HostName("host-2"), new HostName("host-3"))); private static final int REST_PORT = Defaults.getDefaults().vespaWebServicePort(); private final JaxRsClientFactory jaxRsClientFactory = mock(JaxRsClientFactory.class); private final TestJaxRsApi mockApi = mock(TestJaxRsApi.class); private final RetryingJaxRsStrategy<TestJaxRsApi> jaxRsStrategy = new RetryingJaxRsStrategy<>( SERVER_HOSTS, REST_PORT, jaxRsClientFactory, TestJaxRsApi.class, API_PATH, "http"); @Before public void setup() { when(jaxRsClientFactory.createClient(eq(TestJaxRsApi.class), any(HostName.class), anyInt(), anyString(), anyString())) .thenReturn(mockApi); } @Test public void noRetryIfNoFailure() throws Exception { jaxRsStrategy.apply(TestJaxRsApi::doSomething); verify(mockApi, times(1)).doSomething(); final ArgumentCaptor<HostName> hostNameCaptor = ArgumentCaptor.forClass(HostName.class); verify(jaxRsClientFactory, times(1)) .createClient(eq(TestJaxRsApi.class), hostNameCaptor.capture(), eq(REST_PORT), eq(API_PATH), eq("http")); assertThat(SERVER_HOSTS.contains(hostNameCaptor.getValue()), is(true)); } @Test public void testRetryAfterSingleFailure() throws Exception { when(mockApi.doSomething()) .thenThrow(new ProcessingException("Fake timeout induced by test")) .thenReturn("a response"); jaxRsStrategy.apply(TestJaxRsApi::doSomething); verify(mockApi, times(2)).doSomething(); } @Test public void testRetryUsesAllAvailableServers() throws Exception { when(mockApi.doSomething()) .thenThrow(new ProcessingException("Fake timeout 1 induced by test")) .thenThrow(new ProcessingException("Fake timeout 2 induced by test")) .thenReturn("a response"); jaxRsStrategy.apply(TestJaxRsApi::doSomething); verify(mockApi, times(3)).doSomething(); verifyAllServersContacted(jaxRsClientFactory); } @Test public void testRetryLoopsOverAvailableServers() throws Exception { when(mockApi.doSomething()) .thenThrow(new ProcessingException("Fake timeout 1 induced by test")) .thenThrow(new ProcessingException("Fake timeout 2 induced by test")) .thenThrow(new ProcessingException("Fake timeout 3 induced by test")) .thenThrow(new ProcessingException("Fake timeout 4 induced by test")) .thenReturn("a response"); jaxRsStrategy.apply(TestJaxRsApi::doSomething); verify(mockApi, times(5)).doSomething(); verifyAllServersContacted(jaxRsClientFactory); } @Test public void testRetryGivesUpAfterOneLoopOverAvailableServers() { jaxRsStrategy.setMaxIterations(1); testRetryGivesUpAfterXIterations(1); } @Test private void testRetryGivesUpAfterXIterations(int iterations) { OngoingStubbing<String> stub = when(mockApi.doSomething()); for (int i = 0; i < iterations; ++i) { stub = stub .thenThrow(new ProcessingException("Fake timeout 1 iteration " + i)) .thenThrow(new ProcessingException("Fake timeout 2 iteration " + i)) .thenThrow(new ProcessingException("Fake timeout 3 iteration " + i)); } try { jaxRsStrategy.apply(TestJaxRsApi::doSomething); fail("Exception should be thrown from above statement"); } catch (IOException e) { } verify(mockApi, times(iterations * 3)).doSomething(); verifyAllServersContacted(jaxRsClientFactory); } private static void verifyAllServersContacted( final JaxRsClientFactory jaxRsClientFactory) { final ArgumentCaptor<HostName> hostNameCaptor = ArgumentCaptor.forClass(HostName.class); verify(jaxRsClientFactory, atLeast(SERVER_HOSTS.size())) .createClient(eq(TestJaxRsApi.class), hostNameCaptor.capture(), eq(REST_PORT), eq(API_PATH), eq("http")); final Set<HostName> actualServerHostsContacted = new HashSet<>(hostNameCaptor.getAllValues()); assertThat(actualServerHostsContacted, equalTo(SERVER_HOSTS)); } }
Exception messages are logged where they are caught, so you either: 1. throw an exception here, and take care to include the cause, i.e.g, `throw new RuntimeException("Some message", e);`, or 2. catch the exception and log it, using the `Logger log`. Which of the two depends on where you can deal with the exception. In this case, there isn't much you can do, and the result should be a 400 or a 500, depending on whether the input was wrong (jsonToSlime threw) or if the internal request object encountered an exception (readBytes threw). I guess it was a good idea to throw a separate exception for the JSON exception, so perhaps you should bring that back :)
private HttpSearchResponse handleBody(HttpRequest request){ String queryProfileName = request.getProperty("queryProfile"); CompiledQueryProfile queryProfile = queryProfileRegistry.findQueryProfile(queryProfileName); boolean benchmarkOutput = VespaHeaders.benchmarkOutput(request); Query query; if (request.getMethod() == com.yahoo.jdisc.http.HttpRequest.Method.POST && request.getHeader(com.yahoo.jdisc.http.HttpHeaders.Names.CONTENT_TYPE).equals(JSON_CONTENT_TYPE)) { Inspector inspector = null; try { byte[] byteArray = IOUtils.readBytes(request.getData(), 1 << 20); inspector = SlimeUtils.jsonToSlime(byteArray).get(); } catch (IOException e) { e.printStackTrace(); throw new RuntimeException("Could not resolve JSON-query"); } Map<String, String> requestMap = new HashMap<>(); createRequestMapping(inspector, requestMap, ""); query = new Query(request, requestMap, queryProfile); } else { query = new Query(request, queryProfile); } boolean benchmarkCoverage = VespaHeaders.benchmarkCoverage(benchmarkOutput, request.getJDiscRequest().headers()); String invalidReason = query.validate(); Chain<Searcher> searchChain = null; String searchChainName = null; if (invalidReason == null) { Tuple2<String, Chain<Searcher>> nameAndChain = resolveChain(query.properties().getString(Query.SEARCH_CHAIN)); searchChainName = nameAndChain.first; searchChain = nameAndChain.second; } Result result; if (invalidReason != null) { result = new Result(query, ErrorMessage.createIllegalQuery(invalidReason)); } else if (queryProfile == null && queryProfileName != null) { result = new Result( query, ErrorMessage.createIllegalQuery("Could not resolve query profile '" + queryProfileName + "'")); } else if (searchChain == null) { result = new Result( query, ErrorMessage.createInvalidQueryParameter("No search chain named '" + searchChainName + "' was found")); } else { String pathAndQuery = UriTools.rawRequest(request.getUri()); result = search(pathAndQuery, query, searchChain, searchChainRegistry); } Renderer renderer; if (result.getTemplating().usesDefaultTemplate()) { renderer = toRendererCopy(query.getPresentation().getRenderer()); result.getTemplating().setRenderer(renderer); } else { renderer = perRenderingCopy(result.getTemplating().getRenderer()); } HttpSearchResponse response = new HttpSearchResponse(getHttpResponseStatus(request, result), result, query, renderer); if (hostResponseHeaderKey.isPresent()) response.headers().add(hostResponseHeaderKey.get(), selfHostname); if (benchmarkOutput) VespaHeaders.benchmarkOutput(response.headers(), benchmarkCoverage, response.getTiming(), response.getHitCounts(), getErrors(result), response.getCoverage()); return response; }
e.printStackTrace();
private HttpSearchResponse handleBody(HttpRequest request){ String queryProfileName = request.getProperty("queryProfile"); CompiledQueryProfile queryProfile = queryProfileRegistry.findQueryProfile(queryProfileName); boolean benchmarkOutput = VespaHeaders.benchmarkOutput(request); Query query = queryFromRequest(request, queryProfile); boolean benchmarkCoverage = VespaHeaders.benchmarkCoverage(benchmarkOutput, request.getJDiscRequest().headers()); String invalidReason = query.validate(); Chain<Searcher> searchChain = null; String searchChainName = null; if (invalidReason == null) { Tuple2<String, Chain<Searcher>> nameAndChain = resolveChain(query.properties().getString(Query.SEARCH_CHAIN)); searchChainName = nameAndChain.first; searchChain = nameAndChain.second; } Result result; if (invalidReason != null) { result = new Result(query, ErrorMessage.createIllegalQuery(invalidReason)); } else if (queryProfile == null && queryProfileName != null) { result = new Result( query, ErrorMessage.createIllegalQuery("Could not resolve query profile '" + queryProfileName + "'")); } else if (searchChain == null) { result = new Result( query, ErrorMessage.createInvalidQueryParameter("No search chain named '" + searchChainName + "' was found")); } else { String pathAndQuery = UriTools.rawRequest(request.getUri()); result = search(pathAndQuery, query, searchChain, searchChainRegistry); } Renderer renderer; if (result.getTemplating().usesDefaultTemplate()) { renderer = toRendererCopy(query.getPresentation().getRenderer()); result.getTemplating().setRenderer(renderer); } else { renderer = perRenderingCopy(result.getTemplating().getRenderer()); } HttpSearchResponse response = new HttpSearchResponse(getHttpResponseStatus(request, result), result, query, renderer); if (hostResponseHeaderKey.isPresent()) response.headers().add(hostResponseHeaderKey.get(), selfHostname); if (benchmarkOutput) VespaHeaders.benchmarkOutput(response.headers(), benchmarkCoverage, response.getTiming(), response.getHitCounts(), getErrors(result), response.getCoverage()); return response; }
class MeanConnections implements Callback { @Override public void run(Handle h, boolean firstTime) { if (firstTime) { metric.set(SEARCH_CONNECTIONS, 0.0d, null); return; } Value v = (Value) h; metric.set(SEARCH_CONNECTIONS, v.getMean(), null); } }
class MeanConnections implements Callback { @Override public void run(Handle h, boolean firstTime) { if (firstTime) { metric.set(SEARCH_CONNECTIONS, 0.0d, null); return; } Value v = (Value) h; metric.set(SEARCH_CONNECTIONS, v.getMean(), null); } }