comment
stringlengths
1
45k
method_body
stringlengths
23
281k
target_code
stringlengths
0
5.16k
method_body_after
stringlengths
12
281k
context_before
stringlengths
8
543k
context_after
stringlengths
8
543k
Use "try-with-resource" to ensure all streams are closed if an exception is thrown during read/write.
private void compress(File oldFile) { File gzippedFile = new File(oldFile.getPath() + ".gz"); try { long mtime = oldFile.lastModified(); GZIPOutputStream compressor = new GZIPOutputStream(new FileOutputStream(gzippedFile), 0x100000); FileInputStream inputStream = new FileInputStream(oldFile); byte [] buffer = new byte[0x100000]; for (int read = inputStream.read(buffer); read > 0; read = inputStream.read(buffer)) { compressor.write(buffer, 0, read); } inputStream.close(); compressor.finish(); compressor.flush(); compressor.close(); oldFile.delete(); gzippedFile.setLastModified(mtime); log.info("Compressed: "+gzippedFile); } catch (IOException e) { log.warning("Got '" + e + "' while compressing '" + oldFile.getPath() + "'."); } }
GZIPOutputStream compressor = new GZIPOutputStream(new FileOutputStream(gzippedFile), 0x100000);
private void compress(File oldFile) { File gzippedFile = new File(oldFile.getPath() + ".gz"); try { long mtime = oldFile.lastModified(); GZIPOutputStream compressor = new GZIPOutputStream(new FileOutputStream(gzippedFile), 0x100000); FileInputStream inputStream = new FileInputStream(oldFile); byte [] buffer = new byte[0x100000]; for (int read = inputStream.read(buffer); read > 0; read = inputStream.read(buffer)) { compressor.write(buffer, 0, read); } inputStream.close(); compressor.finish(); compressor.flush(); compressor.close(); oldFile.delete(); gzippedFile.setLastModified(mtime); log.info("Compressed: "+gzippedFile); } catch (IOException e) { log.warning("Got '" + e + "' while compressing '" + oldFile.getPath() + "'."); } }
class FilesArchived { private static final Logger log = Logger.getLogger(FilesArchived.class.getName()); /** * File instance representing root directory of archive */ private final File root; private List<LogFile> knownFiles; public final static long compressAfterMillis = 2L * 3600 * 1000; private long maxAgeDays = 30; private long sizeLimit = 30L * (1L << 30); /** * Creates an FilesArchive managing the given directory */ public FilesArchived(File rootDir) { this.root = rootDir; maintenance(); } public String toString() { return FilesArchived.class.getName() + ": root=" + root; } public int highestGen(String prefix) { int gen = 0; for (LogFile lf : knownFiles) { if (prefix.equals(lf.prefix)) { gen = Math.max(gen, lf.generation); } } return gen; } public synchronized void maintenance() { rescan(); if (removeOlderThan(maxAgeDays)) rescan(); if (compressOldFiles()) rescan(); long days = maxAgeDays; while (tooMuchDiskUsage() && (--days > 1)) { if (removeOlderThan(days)) rescan(); } } private void rescan() { knownFiles = scanDir(root); } boolean tooMuchDiskUsage() { long sz = sumFileSizes(); return sz > sizeLimit; } private boolean olderThan(LogFile lf, long days, long now) { long mtime = lf.path.lastModified(); long diff = now - mtime; return (diff > days * 86400L * 1000L); } private boolean removeOlderThan(long days) { boolean action = false; long now = System.currentTimeMillis(); for (LogFile lf : knownFiles) { if (olderThan(lf, days, now)) { lf.path.delete(); log.info("Deleted: "+lf.path); action = true; } } return action; } private boolean compressOldFiles() { boolean action = false; long now = System.currentTimeMillis(); int count = 0; for (LogFile lf : knownFiles) { if (lf.canCompress(now) && (count++ < 5)) { compress(lf.path); } } return count > 0; } public long sumFileSizes() { long sum = 0; for (LogFile lf : knownFiles) { sum += lf.path.length(); } return sum; } private static List<LogFile> scanDir(File top) { List<LogFile> retval = new ArrayList<>(); String[] names = top.list(); if (names != null) { for (String name : names) { File sub = new File(top, name); if (sub.isFile()) { retval.add(new LogFile(sub)); } else if (sub.isDirectory()) { for (LogFile subFile : scanDir(sub)) { retval.add(subFile); } } } } return retval; } static class LogFile { public final File path; public final String prefix; public final int generation; public final boolean zsuff; public boolean canCompress(long now) { if (zsuff) return false; if (! path.isFile()) return false; long diff = now - path.lastModified(); if (diff < compressAfterMillis) return false; return true; } private static int generationOf(String name) { int dash = name.lastIndexOf('-'); if (dash < 0) return 0; String suff = name.substring(dash + 1); int r = 0; for (char ch : suff.toCharArray()) { if (ch >= '0' && ch <= '9') { r *= 10; r += (ch - '0'); } else { break; } } return r; } private static String prefixOf(String name) { int dash = name.lastIndexOf('-'); if (dash < 0) return name; return name.substring(0, dash); } private static boolean zSuffix(String name) { if (name.endsWith(".gz")) return true; return false; } public LogFile(File path) { String name = path.toString(); this.path = path; this.prefix = prefixOf(name); this.generation = generationOf(name); this.zsuff = zSuffix(name); } public String toString() { return "FilesArchived.LogFile{name="+path+" prefix="+prefix+" gen="+generation+" z="+zsuff+"}"; } } }
class FilesArchived { private static final Logger log = Logger.getLogger(FilesArchived.class.getName()); /** * File instance representing root directory of archive */ private final File root; private List<LogFile> knownFiles; public final static long compressAfterMillis = 2L * 3600 * 1000; private long maxAgeDays = 30; private long sizeLimit = 30L * (1L << 30); /** * Creates an FilesArchive managing the given directory */ public FilesArchived(File rootDir) { this.root = rootDir; maintenance(); } public String toString() { return FilesArchived.class.getName() + ": root=" + root; } public int highestGen(String prefix) { int gen = 0; for (LogFile lf : knownFiles) { if (prefix.equals(lf.prefix)) { gen = Math.max(gen, lf.generation); } } return gen; } public synchronized void maintenance() { rescan(); if (removeOlderThan(maxAgeDays)) rescan(); if (compressOldFiles()) rescan(); long days = maxAgeDays; while (tooMuchDiskUsage() && (--days > 1)) { if (removeOlderThan(days)) rescan(); } } private void rescan() { knownFiles = scanDir(root); } boolean tooMuchDiskUsage() { long sz = sumFileSizes(); return sz > sizeLimit; } private boolean olderThan(LogFile lf, long days, long now) { long mtime = lf.path.lastModified(); long diff = now - mtime; return (diff > days * 86400L * 1000L); } private boolean removeOlderThan(long days) { boolean action = false; long now = System.currentTimeMillis(); for (LogFile lf : knownFiles) { if (olderThan(lf, days, now)) { lf.path.delete(); log.info("Deleted: "+lf.path); action = true; } } return action; } private boolean compressOldFiles() { boolean action = false; long now = System.currentTimeMillis(); int count = 0; for (LogFile lf : knownFiles) { if (lf.canCompress(now) && (count++ < 5)) { compress(lf.path); } } return count > 0; } public long sumFileSizes() { long sum = 0; for (LogFile lf : knownFiles) { sum += lf.path.length(); } return sum; } private static List<LogFile> scanDir(File top) { List<LogFile> retval = new ArrayList<>(); String[] names = top.list(); if (names != null) { for (String name : names) { File sub = new File(top, name); if (sub.isFile()) { retval.add(new LogFile(sub)); } else if (sub.isDirectory()) { for (LogFile subFile : scanDir(sub)) { retval.add(subFile); } } } } return retval; } static class LogFile { public final File path; public final String prefix; public final int generation; public final boolean zsuff; public boolean canCompress(long now) { if (zsuff) return false; if (! path.isFile()) return false; long diff = now - path.lastModified(); if (diff < compressAfterMillis) return false; return true; } private static int generationOf(String name) { int dash = name.lastIndexOf('-'); if (dash < 0) return 0; String suff = name.substring(dash + 1); int r = 0; for (char ch : suff.toCharArray()) { if (ch >= '0' && ch <= '9') { r *= 10; r += (ch - '0'); } else { break; } } return r; } private static String prefixOf(String name) { int dash = name.lastIndexOf('-'); if (dash < 0) return name; return name.substring(0, dash); } private static boolean zSuffix(String name) { if (name.endsWith(".gz")) return true; return false; } public LogFile(File path) { String name = path.toString(); this.path = path; this.prefix = prefixOf(name); this.generation = generationOf(name); this.zsuff = zSuffix(name); } public String toString() { return "FilesArchived.LogFile{name="+path+" prefix="+prefix+" gen="+generation+" z="+zsuff+"}"; } } }
Btw, this is not necessary as `TemporaryFolder` will delete the directory once the test methods is finishes (even if test fails).
public void testMaintenance() throws java.io.IOException { File tmpDir = temporaryFolder.newFolder(); try { makeLogfile(tmpDir, "2018/11/20/13-0", 35*24); makeLogfile(tmpDir, "2018/11/21/13-0", 34*24); makeLogfile(tmpDir, "2018/12/28/13-0", 3*24); makeLogfile(tmpDir, "2018/12/29/13-0", 2*24); makeLogfile(tmpDir, "2018/12/30/13-0", 1*24); makeLogfile(tmpDir, "2018/12/31/14-0", 3); makeLogfile(tmpDir, "2018/12/31/16-0", 1); makeLogfile(tmpDir, "2018/12/31/17-0", 0); dumpFiles(tmpDir, "before archive maintenance"); FilesArchived a = new FilesArchived(tmpDir); dumpFiles(tmpDir, "after archive maintenance"); checkExist(tmpDir, "2018/12/31/17-0"); checkExist(tmpDir, "2018/12/31/16-0"); checkExist(tmpDir, "2018/12/31/14-0.gz"); checkExist(tmpDir, "2018/12/28/13-0.gz"); checkExist(tmpDir, "2018/12/29/13-0.gz"); checkExist(tmpDir, "2018/12/30/13-0.gz"); checkNoExist(tmpDir, "2018/12/31/17-0.gz"); checkNoExist(tmpDir, "2018/12/31/16-0.gz"); checkNoExist(tmpDir, "2018/12/31/14-0"); checkNoExist(tmpDir, "2018/12/28/13-0"); checkNoExist(tmpDir, "2018/12/29/13-0"); checkNoExist(tmpDir, "2018/12/30/13-0"); checkNoExist(tmpDir, "2018/11/20/13-0"); checkNoExist(tmpDir, "2018/11/20/13-0.gz"); checkNoExist(tmpDir, "2018/11/21/13-0"); checkNoExist(tmpDir, "2018/11/21/13-0.gz"); makeLogfile(tmpDir, "2018/12/31/16-0", 3); makeLogfile(tmpDir, "2018/12/31/17-0", 3); makeLogfile(tmpDir, "2018/12/31/17-1", 1); makeLogfile(tmpDir, "2018/12/31/17-2", 0); dumpFiles(tmpDir, "before second archive maintenance"); a.maintenance(); dumpFiles(tmpDir, "after second archive maintenance"); checkExist(tmpDir, "2018/12/31/17-2"); checkExist(tmpDir, "2018/12/31/17-1"); checkExist(tmpDir, "2018/12/31/16-0.gz"); checkExist(tmpDir, "2018/12/31/17-0.gz"); checkNoExist(tmpDir, "2018/12/31/16-0"); checkNoExist(tmpDir, "2018/12/31/17-0"); } finally { IOUtils.recursiveDeleteDir(tmpDir); } }
IOUtils.recursiveDeleteDir(tmpDir);
public void testMaintenance() throws java.io.IOException { File tmpDir = temporaryFolder.newFolder(); try { makeLogfile(tmpDir, "2018/11/20/13-0", 35*24); makeLogfile(tmpDir, "2018/11/21/13-0", 34*24); makeLogfile(tmpDir, "2018/12/28/13-0", 3*24); makeLogfile(tmpDir, "2018/12/29/13-0", 2*24); makeLogfile(tmpDir, "2018/12/30/13-0", 1*24); makeLogfile(tmpDir, "2018/12/31/14-0", 3); makeLogfile(tmpDir, "2018/12/31/16-0", 1); makeLogfile(tmpDir, "2018/12/31/17-0", 0); dumpFiles(tmpDir, "before archive maintenance"); FilesArchived a = new FilesArchived(tmpDir); dumpFiles(tmpDir, "after archive maintenance"); checkExist(tmpDir, "2018/12/31/17-0"); checkExist(tmpDir, "2018/12/31/16-0"); checkExist(tmpDir, "2018/12/31/14-0.gz"); checkExist(tmpDir, "2018/12/28/13-0.gz"); checkExist(tmpDir, "2018/12/29/13-0.gz"); checkExist(tmpDir, "2018/12/30/13-0.gz"); checkNoExist(tmpDir, "2018/12/31/17-0.gz"); checkNoExist(tmpDir, "2018/12/31/16-0.gz"); checkNoExist(tmpDir, "2018/12/31/14-0"); checkNoExist(tmpDir, "2018/12/28/13-0"); checkNoExist(tmpDir, "2018/12/29/13-0"); checkNoExist(tmpDir, "2018/12/30/13-0"); checkNoExist(tmpDir, "2018/11/20/13-0"); checkNoExist(tmpDir, "2018/11/20/13-0.gz"); checkNoExist(tmpDir, "2018/11/21/13-0"); checkNoExist(tmpDir, "2018/11/21/13-0.gz"); makeLogfile(tmpDir, "2018/12/31/16-0", 3); makeLogfile(tmpDir, "2018/12/31/17-0", 3); makeLogfile(tmpDir, "2018/12/31/17-1", 1); makeLogfile(tmpDir, "2018/12/31/17-2", 0); dumpFiles(tmpDir, "before second archive maintenance"); a.maintenance(); dumpFiles(tmpDir, "after second archive maintenance"); checkExist(tmpDir, "2018/12/31/17-2"); checkExist(tmpDir, "2018/12/31/17-1"); checkExist(tmpDir, "2018/12/31/16-0.gz"); checkExist(tmpDir, "2018/12/31/17-0.gz"); checkNoExist(tmpDir, "2018/12/31/16-0"); checkNoExist(tmpDir, "2018/12/31/17-0"); } finally { IOUtils.recursiveDeleteDir(tmpDir); } }
class FilesArchivedTestCase { @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); private void makeLogfile(File dir, String name, long hours) throws IOException { File f = new File(dir, name); f.getParentFile().mkdirs(); new FileWriter(f).write("foo bar baz\n"); long now = System.currentTimeMillis(); f.setLastModified(now - (hours * 3600 * 1000)); } void checkExist(File dir, String name) { assertTrue(new File(dir, name).isFile()); } void checkNoExist(File dir, String name) { assertFalse(new File(dir, name).isFile()); } @Test private void dumpFiles(File dir, String header) { System.out.println(">>> " + header + " >>> :"); List<String> seen = scanDir(dir); seen.sort(null); for (String s : seen) { System.err.println(" " + s); } System.out.println("<<< " + header + " <<<"); } private static List<String> scanDir(File top) { List<String> retval = new ArrayList<>(); String[] names = top.list(); if (names != null) { for (String name : names) { File sub = new File(top, name); if (sub.isFile()) { retval.add(sub.toString()); } else if (sub.isDirectory()) { for (String subFile : scanDir(sub)) { retval.add(subFile); } } } } return retval; } }
class FilesArchivedTestCase { @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); private void makeLogfile(File dir, String name, long hours) throws IOException { File f = new File(dir, name); f.getParentFile().mkdirs(); new FileWriter(f).write("foo bar baz\n"); long now = System.currentTimeMillis(); f.setLastModified(now - (hours * 3600 * 1000)); } void checkExist(File dir, String name) { assertTrue(new File(dir, name).isFile()); } void checkNoExist(File dir, String name) { assertFalse(new File(dir, name).isFile()); } @Test private void dumpFiles(File dir, String header) { System.out.println(">>> " + header + " >>> :"); List<String> seen = scanDir(dir); seen.sort(null); for (String s : seen) { System.err.println(" " + s); } System.out.println("<<< " + header + " <<<"); } private static List<String> scanDir(File top) { List<String> retval = new ArrayList<>(); String[] names = top.list(); if (names != null) { for (String name : names) { File sub = new File(top, name); if (sub.isFile()) { retval.add(sub.toString()); } else if (sub.isDirectory()) { for (String subFile : scanDir(sub)) { retval.add(subFile); } } } } return retval; } }
Isn't this still a race? Another thread can still update application/deployments after lock is released here, before `require` completes, but maybe not an issue in practice.
private Application application(ApplicationId id) { controller.applications().lockOrThrow(id, __ -> { }); return controller.applications().require(id); }
controller.applications().lockOrThrow(id, __ -> { });
private Application application(ApplicationId id) { controller.applications().lockOrThrow(id, __ -> { }); return controller.applications().require(id); }
class InternalStepRunner implements StepRunner { private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName()); static final Duration endpointTimeout = Duration.ofMinutes(15); static final Duration installationTimeout = Duration.ofMinutes(150); private final Controller controller; private final DeploymentFailureMails mails; public InternalStepRunner(Controller controller) { this.controller = controller; this.mails = new DeploymentFailureMails(controller.zoneRegistry()); } @Override public Optional<RunStatus> run(LockedStep step, RunId id) { DualLogger logger = new DualLogger(id, step.get()); try { switch (step.get()) { case deployInitialReal: return deployInitialReal(id, logger); case installInitialReal: return installInitialReal(id, logger); case deployReal: return deployReal(id, logger); case deployTester: return deployTester(id, logger); case installReal: return installReal(id, logger); case installTester: return installTester(id, logger); case startTests: return startTests(id, logger); case endTests: return endTests(id, logger); case copyVespaLogs: return copyVespaLogs(id, logger); case deactivateReal: return deactivateReal(id, logger); case deactivateTester: return deactivateTester(id, logger); case report: return report(id, logger); default: throw new AssertionError("Unknown step '" + step + "'!"); } } catch (UncheckedIOException e) { logger.log(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e)); return Optional.empty(); } catch (RuntimeException e) { logger.log(WARNING, "Unexpected exception running " + id, e); if (JobProfile.of(id.type()).alwaysRun().contains(step.get())) { logger.log("Will keep trying, as this is a cleanup step."); return Optional.empty(); } return Optional.of(error); } } private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) { Versions versions = controller.jobController().run(id).get().versions(); logger.log("Deploying platform version " + versions.sourcePlatform().orElse(versions.targetPlatform()) + " and application version " + versions.sourceApplication().orElse(versions.targetApplication()).id() + " ..."); return deployReal(id, true, versions, logger); } private Optional<RunStatus> deployReal(RunId id, DualLogger logger) { Versions versions = controller.jobController().run(id).get().versions(); logger.log("Deploying platform version " + versions.targetPlatform() + " and application version " + versions.targetApplication().id() + " ..."); return deployReal(id, false, versions, logger); } private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, Versions versions, DualLogger logger) { Optional<ApplicationPackage> applicationPackage = id.type().environment().isManuallyDeployed() ? Optional.of(new ApplicationPackage(controller.applications().applicationStore() .getDev(id.application(), id.type().zone(controller.system())))) : Optional.empty(); Optional<Version> vespaVersion = id.type().environment().isManuallyDeployed() ? Optional.of(versions.targetPlatform()) : Optional.empty(); return deploy(id.application(), id.type(), () -> controller.applications().deploy(id.application(), id.type().zone(controller.system()), applicationPackage, new DeployOptions(false, vespaVersion, false, setTheStage)), logger); } private Optional<RunStatus> deployTester(RunId id, DualLogger logger) { Version platform = controller.jobController().run(id).get().versions().targetPlatform(); logger.log("Deploying the tester container on platform " + platform + " ..."); return deploy(id.tester().id(), id.type(), () -> controller.applications().deployTester(id.tester(), testerPackage(id), id.type().zone(controller.system()), new DeployOptions(true, Optional.of(platform), false, false)), logger); } private Optional<RunStatus> deploy(ApplicationId id, JobType type, Supplier<ActivateResult> deployment, DualLogger logger) { try { PrepareResponse prepareResponse = deployment.get().prepareResponse(); if ( ! prepareResponse.configChangeActions.refeedActions.stream().allMatch(action -> action.allowed)) { List<String> messages = new ArrayList<>(); messages.add("Deploy failed due to non-compatible changes that require re-feed."); messages.add("Your options are:"); messages.add("1. Revert the incompatible changes."); messages.add("2. If you think it is safe in your case, you can override this validation, see"); messages.add(" http: messages.add("3. Deploy as a new application under a different name."); messages.add("Illegal actions:"); prepareResponse.configChangeActions.refeedActions.stream() .filter(action -> ! action.allowed) .flatMap(action -> action.messages.stream()) .forEach(messages::add); messages.add("Details:"); prepareResponse.log.stream() .map(entry -> entry.message) .forEach(messages::add); logger.log(messages); return Optional.of(deploymentFailed); } if (prepareResponse.configChangeActions.restartActions.isEmpty()) logger.log("No services requiring restart."); else prepareResponse.configChangeActions.restartActions.stream() .flatMap(action -> action.services.stream()) .map(service -> service.hostName) .sorted().distinct() .map(Hostname::new) .forEach(hostname -> { controller.applications().restart(new DeploymentId(id, type.zone(controller.system())), Optional.of(hostname)); logger.log("Restarting services on host " + hostname.id() + "."); }); logger.log("Deployment successful."); return Optional.of(running); } catch (ConfigServerException e) { if ( e.getErrorCode() == OUT_OF_CAPACITY && type.isTest() || e.getErrorCode() == ACTIVATION_CONFLICT || e.getErrorCode() == APPLICATION_LOCK_FAILURE || e.getErrorCode() == PARENT_HOST_NOT_READY) { logger.log("Will retry, because of '" + e.getErrorCode() + "' deploying:\n" + e.getMessage()); return Optional.empty(); } if ( e.getErrorCode() == INVALID_APPLICATION_PACKAGE || e.getErrorCode() == BAD_REQUEST) { logger.log("Deployment failed: " + e.getMessage()); return Optional.of(deploymentFailed); } throw e; } } private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) { return installReal(id, true, logger); } private Optional<RunStatus> installReal(RunId id, DualLogger logger) { return installReal(id, false, logger); } private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if ( ! deployment.isPresent()) { logger.log(INFO, "Deployment expired before installation was successful."); return Optional.of(installationFailed); } Versions versions = controller.jobController().run(id).get().versions(); Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform(); ApplicationVersion application = setTheStage ? versions.sourceApplication().orElse(versions.targetApplication()) : versions.targetApplication(); logger.log("Checking installation of " + platform + " and " + application.id() + " ..."); if ( nodesConverged(id.application(), id.type(), platform, logger) && servicesConverged(id.application(), id.type(), logger)) { if (endpointsAvailable(id.application(), id.type().zone(controller.system()), logger)) { logger.log("Installation succeeded!"); return Optional.of(running); } else if (timedOut(deployment.get(), endpointTimeout)) { logger.log(WARNING, "Endpoints failed to show up within " + endpointTimeout.toMinutes() + " minutes!"); return Optional.of(error); } } if (timedOut(deployment.get(), installationTimeout)) { logger.log(INFO, "Installation failed to complete within " + installationTimeout.toMinutes() + " minutes!"); return Optional.of(installationFailed); } logger.log("Installation not yet complete."); return Optional.empty(); } private Optional<RunStatus> installTester(RunId id, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if ( ! deployment.isPresent()) { logger.log(WARNING, "Deployment expired before installation of tester was successful."); return Optional.of(error); } Version platform = controller.jobController().run(id).get().versions().targetPlatform(); logger.log("Checking installation of tester container ..."); if ( nodesConverged(id.tester().id(), id.type(), platform, logger) && servicesConverged(id.tester().id(), id.type(), logger)) { if (endpointsAvailable(id.tester().id(), id.type().zone(controller.system()), logger)) { logger.log("Tester container successfully installed!"); return Optional.of(running); } else if (timedOut(deployment.get(), endpointTimeout)) { logger.log(WARNING, "Tester failed to show up within " + endpointTimeout.toMinutes() + " minutes!"); return Optional.of(error); } } if (timedOut(deployment.get(), installationTimeout)) { logger.log(WARNING, "Installation of tester failed to complete within " + installationTimeout.toMinutes() + " minutes of real deployment!"); return Optional.of(error); } logger.log("Installation of tester not yet complete."); return Optional.empty(); } private boolean endpointsAvailable(ApplicationId id, ZoneId zoneId, DualLogger logger) { logger.log("Attempting to find deployment endpoints ..."); Map<ZoneId, List<URI>> endpoints = deploymentEndpoints(id, Set.of(zoneId)); if ( ! endpoints.containsKey(zoneId)) { logger.log("Endpoints not yet ready."); return false; } List<String> messages = new ArrayList<>(); messages.add("Found endpoints:"); endpoints.forEach((zone, uris) -> { messages.add("- " + zone); uris.forEach(uri -> messages.add(" |-- " + uri)); }); logger.log(messages); return true; } private boolean nodesConverged(ApplicationId id, JobType type, Version target, DualLogger logger) { List<Node> nodes = controller.configServer().nodeRepository().list(type.zone(controller.system()), id, ImmutableSet.of(active, reserved)); List<String> statuses = nodes.stream() .map(node -> String.format("%70s: %-16s%-25s%-32s%s", node.hostname(), node.serviceState(), node.wantedVersion() + (node.currentVersion().equals(node.wantedVersion()) ? "" : " <-- " + node.currentVersion()), node.restartGeneration() >= node.wantedRestartGeneration() ? "" : "restart pending (" + node.wantedRestartGeneration() + " <-- " + node.restartGeneration() + ")", node.rebootGeneration() >= node.wantedRebootGeneration() ? "" : "reboot pending (" + node.wantedRebootGeneration() + " <-- " + node.rebootGeneration() + ")")) .collect(Collectors.toList()); logger.log(statuses); return nodes.stream().allMatch(node -> node.currentVersion().equals(target) && node.restartGeneration() >= node.wantedRestartGeneration() && node.rebootGeneration() >= node.wantedRebootGeneration()); } private boolean servicesConverged(ApplicationId id, JobType type, DualLogger logger) { Optional<ServiceConvergence> convergence = controller.configServer().serviceConvergence(new DeploymentId(id, type.zone(controller.system()))); if ( ! convergence.isPresent()) { logger.log("Config status not currently available -- will retry."); return false; } logger.log("Wanted config generation is " + convergence.get().wantedGeneration()); List<String> statuses = convergence.get().services().stream() .filter(serviceStatus -> serviceStatus.currentGeneration() != convergence.get().wantedGeneration()) .map(serviceStatus -> String.format("%70s: %11s on port %4d has %s", serviceStatus.host().value(), serviceStatus.type(), serviceStatus.port(), serviceStatus.currentGeneration() == -1 ? "not started!" : Long.toString(serviceStatus.currentGeneration()))) .collect(Collectors.toList()); logger.log(statuses); if (statuses.isEmpty()) logger.log("All services on wanted config generation."); return convergence.get().converged(); } private Optional<RunStatus> startTests(RunId id, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if ( ! deployment.isPresent()) { logger.log(INFO, "Deployment expired before tests could start."); return Optional.of(aborted); } Set<ZoneId> zones = testedZoneAndProductionZones(id); logger.log("Attempting to find endpoints ..."); Map<ZoneId, List<URI>> endpoints = deploymentEndpoints(id.application(), zones); if ( ! endpoints.containsKey(id.type().zone(controller.system())) && timedOut(deployment.get(), endpointTimeout)) { logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!"); return Optional.of(error); } List<String> messages = new ArrayList<>(); messages.add("Found endpoints:"); endpoints.forEach((zone, uris) -> { messages.add("- " + zone); uris.forEach(uri -> messages.add(" |-- " + uri)); }); logger.log(messages); Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id); if (testerEndpoint.isEmpty() && timedOut(deployment.get(), endpointTimeout)) { logger.log(WARNING, "Endpoints for the tester container vanished again, while it was still active!"); return Optional.of(error); } if (controller.jobController().cloud().ready(testerEndpoint.get())) { logger.log("Starting tests ..."); controller.jobController().cloud().startTests(testerEndpoint.get(), TesterCloud.Suite.of(id.type()), testConfig(id.application(), id.type().zone(controller.system()), controller.system(), endpoints, listClusters(id.application(), zones))); return Optional.of(running); } logger.log("Tester container not yet ready."); return Optional.empty(); } private Optional<RunStatus> endTests(RunId id, DualLogger logger) { if ( ! deployment(id.application(), id.type()).isPresent()) { logger.log(INFO, "Deployment expired before tests could complete."); return Optional.of(aborted); } Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id); if ( ! testerEndpoint.isPresent()) { logger.log("Endpoints for tester not found -- trying again later."); return Optional.empty(); } controller.jobController().updateTestLog(id); TesterCloud.Status testStatus = controller.jobController().cloud().getStatus(testerEndpoint.get()); switch (testStatus) { case NOT_STARTED: throw new IllegalStateException("Tester reports tests not started, even though they should have!"); case RUNNING: return Optional.empty(); case FAILURE: logger.log("Tests failed."); return Optional.of(testFailure); case ERROR: logger.log(INFO, "Tester failed running its tests!"); return Optional.of(error); case SUCCESS: logger.log("Tests completed successfully."); return Optional.of(running); default: throw new IllegalStateException("Unknown status '" + testStatus + "'!"); } } private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) { ZoneId zone = id.type().zone(controller.system()); if (deployment(id.application(), id.type()).isPresent()) try { logger.log("Copying Vespa log from nodes of " + id.application() + " in " + zone + " ..."); List<LogEntry> entries = new ArrayList<>(); String logs = IOUtils.readAll(controller.configServer().getLogs(new DeploymentId(id.application(), zone), Collections.emptyMap()), StandardCharsets.UTF_8); for (String line : logs.split("\n")) { String[] parts = line.split("\t"); if (parts.length != 7) continue; entries.add(new LogEntry(0, (long) (Double.parseDouble(parts[0]) * 1000), LogEntry.typeOf(LogLevel.parse(parts[5])), parts[1] + '\t' + parts[3] + '\t' + parts[4] + '\n' + parts[6].replaceAll("\\\\n", "\n") .replaceAll("\\\\t", "\t"))); } controller.jobController().log(id, Step.copyVespaLogs, entries); } catch (Exception e) { logger.log(INFO, "Failure getting vespa logs for " + id, e); return Optional.of(error); } return Optional.of(running); } private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) { try { logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ..."); controller.applications().deactivate(id.application(), id.type().zone(controller.system())); return Optional.of(running); } catch (RuntimeException e) { logger.log(WARNING, "Failed deleting application " + id.application(), e); return Optional.of(error); } } private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) { try { logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ..."); controller.jobController().deactivateTester(id.tester(), id.type()); return Optional.of(running); } catch (RuntimeException e) { logger.log(WARNING, "Failed deleting tester of " + id.application(), e); return Optional.of(error); } } private Optional<RunStatus> report(RunId id, DualLogger logger) { try { controller.jobController().active(id).ifPresent(run -> { JobReport report = JobReport.ofJob(run.id().application(), run.id().type(), run.id().number(), run.hasFailed() ? Optional.of(DeploymentJobs.JobError.unknown) : Optional.empty()); controller.applications().deploymentTrigger().notifyOfCompletion(report); if (run.hasFailed()) sendNotification(run, logger); }); } catch (IllegalStateException e) { logger.log(INFO, "Job '" + id.type() + "'no longer supposed to run?:", e); return Optional.of(error); } return Optional.of(running); } /** Sends a mail with a notification of a failed run, if one should be sent. */ private void sendNotification(Run run, DualLogger logger) { Application application = controller.applications().require(run.id().application()); Notifications notifications = application.deploymentSpec().notifications(); boolean newCommit = application.change().application() .map(run.versions().targetApplication()::equals) .orElse(false); When when = newCommit ? failingCommit : failing; List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when)); if (notifications.emailRolesFor(when).contains(author)) run.versions().targetApplication().authorEmail().ifPresent(recipients::add); if (recipients.isEmpty()) return; try { if (run.status() == outOfCapacity && run.id().type().isProduction()) controller.mailer().send(mails.outOfCapacity(run.id(), recipients)); if (run.status() == deploymentFailed) controller.mailer().send(mails.deploymentFailure(run.id(), recipients)); if (run.status() == installationFailed) controller.mailer().send(mails.installationFailure(run.id(), recipients)); if (run.status() == testFailure) controller.mailer().send(mails.testFailure(run.id(), recipients)); if (run.status() == error) controller.mailer().send(mails.systemError(run.id(), recipients)); } catch (RuntimeException e) { logger.log(INFO, "Exception trying to send mail for " + run.id(), e); } } /** Returns the deployment of the real application in the zone of the given job, if it exists. */ private Optional<Deployment> deployment(ApplicationId id, JobType type) { return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system()))); } /** Returns the real application with the given id. */ /** * Returns whether the time since deployment is more than the zone deployment expiry, or the given timeout. * * We time out the job before the deployment expires, for zone where deployments are not persistent, * to be able to collect the Vespa log from the deployment. Thus, the lower of the zone's deployment expiry, * and the given default installation timeout, minus one minute, is used as a timeout threshold. */ private boolean timedOut(Deployment deployment, Duration defaultTimeout) { Duration timeout = controller.zoneRegistry().getDeploymentTimeToLive(deployment.zone()) .filter(zoneTimeout -> zoneTimeout.compareTo(defaultTimeout) < 0) .orElse(defaultTimeout); return deployment.at().isBefore(controller.clock().instant().minus(timeout.minus(Duration.ofMinutes(1)))); } /** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */ private ApplicationPackage testerPackage(RunId id) { ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication(); DeploymentSpec spec = controller.applications().require(id.application()).deploymentSpec(); byte[] servicesXml = servicesXml(controller.system(), testerFlavorFor(id, spec)); byte[] testPackage = controller.applications().applicationStore().get(id.tester(), version); ZoneId zone = id.type().zone(controller.system()); byte[] deploymentXml = deploymentXml(spec.athenzDomain(), spec.athenzService(zone.environment(), zone.region())); try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) { zipBuilder.add(testPackage); zipBuilder.add("services.xml", servicesXml); zipBuilder.add("deployment.xml", deploymentXml); zipBuilder.close(); return new ApplicationPackage(zipBuilder.toByteArray()); } } private static Optional<String> testerFlavorFor(RunId id, DeploymentSpec spec) { for (DeploymentSpec.Step step : spec.steps()) if (step.deploysTo(id.type().environment())) return step.zones().get(0).testerFlavor(); throw new IllegalStateException("No step deploys to the zone this run is for!"); } /** Returns a stream containing the zone of the deployment tested in the given run, and all production zones for the application. */ private Set<ZoneId> testedZoneAndProductionZones(RunId id) { return Stream.concat(Stream.of(id.type().zone(controller.system())), application(id.application()).productionDeployments().keySet().stream()) .collect(Collectors.toSet()); } /** Returns all endpoints for all current deployments of the given real application. */ private Map<ZoneId, List<URI>> deploymentEndpoints(ApplicationId id, Iterable<ZoneId> zones) { ImmutableMap.Builder<ZoneId, List<URI>> deployments = ImmutableMap.builder(); for (ZoneId zone : zones) { controller.applications().getDeploymentEndpoints(new DeploymentId(id, zone)) .filter(endpoints -> ! endpoints.isEmpty()) .or(() -> Optional.of(controller.applications().routingPolicies(new DeploymentId(id, zone)).stream() .map(policy -> policy.endpointIn(controller.system()).url()) .collect(Collectors.toUnmodifiableList())) .filter(endpoints -> ! endpoints.isEmpty())) .ifPresent(endpoints -> deployments.put(zone, endpoints)); } return deployments.build(); } /** Returns all content clusters in all current deployments of the given real application. */ private Map<ZoneId, List<String>> listClusters(ApplicationId id, Iterable<ZoneId> zones) { ImmutableMap.Builder<ZoneId, List<String>> clusters = ImmutableMap.builder(); for (ZoneId zone : zones) clusters.put(zone, ImmutableList.copyOf(controller.configServer().getContentClusters(new DeploymentId(id, zone)))); return clusters.build(); } /** Returns the generated services.xml content for the tester application. */ static byte[] servicesXml(SystemName systemName, Optional<String> testerFlavor) { String domain = systemName == SystemName.main ? "vespa.vespa" : "vespa.vespa.cd"; String flavor = testerFlavor.orElse("d-1-4-50"); int memoryGb = Integer.parseInt(flavor.split("-")[2]); int jdiscMemoryPercentage = (int) Math.ceil(200.0 / memoryGb); int testMemoryMb = 512 * (memoryGb - 2); String servicesXml = "<?xml version='1.0' encoding='UTF-8'?>\n" + "<services xmlns:deploy='vespa' version='1.0'>\n" + " <container version='1.0' id='tester'>\n" + "\n" + " <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" + " <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" + " <artifactsPath>artifacts</artifactsPath>\n" + " <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" + " </config>\n" + " </component>\n" + "\n" + " <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" + " <binding>http: " </handler>\n" + "\n" + " <http>\n" + " <server id='default' port='4080'/>\n" + " <filtering>\n" + " <access-control domain='" + domain + "'>\n" + " <exclude>\n" + " <binding>http: " </exclude>\n" + " </access-control>\n" + " <request-chain id=\"testrunner-api\">\n" + " <filter id='authz-filter' class='com.yahoo.jdisc.http.filter.security.athenz.AthenzAuthorizationFilter' bundle=\"jdisc-security-filters\">\n" + " <config name=\"jdisc.http.filter.security.athenz.athenz-authorization-filter\">\n" + " <credentialsToVerify>TOKEN_ONLY</credentialsToVerify>\n" + " <roleTokenHeaderName>Yahoo-Role-Auth</roleTokenHeaderName>\n" + " </config>\n" + " <component id=\"com.yahoo.jdisc.http.filter.security.athenz.StaticRequestResourceMapper\" bundle=\"jdisc-security-filters\">\n" + " <config name=\"jdisc.http.filter.security.athenz.static-request-resource-mapper\">\n" + " <resourceName>" + domain + ":tester-application</resourceName>\n" + " <action>deploy</action>\n" + " </config>\n" + " </component>\n" + " </filter>\n" + " </request-chain>\n" + " </filtering>\n" + " </http>\n" + "\n" + " <nodes count=\"1\" flavor=\"" + flavor + "\" allocated-memory=\"" + jdiscMemoryPercentage + "%\" />\n" + " </container>\n" + "</services>\n"; return servicesXml.getBytes(StandardCharsets.UTF_8); } /** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */ private static byte[] deploymentXml(Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) { String deploymentSpec = "<?xml version='1.0' encoding='UTF-8'?>\n" + "<deployment version=\"1.0\" " + athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") + athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + "/>"; return deploymentSpec.getBytes(StandardCharsets.UTF_8); } /** Returns the config for the tests to run for the given job. */ private static byte[] testConfig(ApplicationId id, ZoneId testerZone, SystemName system, Map<ZoneId, List<URI>> deployments, Map<ZoneId, List<String>> clusters) { Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString("application", id.serializedForm()); root.setString("zone", testerZone.value()); root.setString("system", system.value()); Cursor endpointsObject = root.setObject("endpoints"); deployments.forEach((zone, endpoints) -> { Cursor endpointArray = endpointsObject.setArray(zone.value()); for (URI endpoint : endpoints) endpointArray.addString(endpoint.toString()); }); Cursor clustersObject = root.setObject("clusters"); clusters.forEach((zone, clusterList) -> { Cursor clusterArray = clustersObject.setArray(zone.value()); for (String cluster : clusterList) clusterArray.addString(cluster); }); try { return SlimeUtils.toJsonBytes(slime); } catch (IOException e) { throw new UncheckedIOException(e); } } /** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */ private class DualLogger { private final RunId id; private final Step step; private DualLogger(RunId id, Step step) { this.id = id; this.step = step; } private void log(String... messages) { log(List.of(messages)); } private void log(List<String> messages) { controller.jobController().log(id, step, DEBUG, messages); } private void log(Level level, String message) { log(level, message, null); } private void log(Level level, String message, Throwable thrown) { logger.log(level, id + " at " + step + ": " + message, thrown); if (thrown != null) { ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream(); thrown.printStackTrace(new PrintStream(traceBuffer)); message += "\n" + traceBuffer; } controller.jobController().log(id, step, level, message); } } }
class InternalStepRunner implements StepRunner { private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName()); static final Duration endpointTimeout = Duration.ofMinutes(15); static final Duration installationTimeout = Duration.ofMinutes(150); private final Controller controller; private final DeploymentFailureMails mails; public InternalStepRunner(Controller controller) { this.controller = controller; this.mails = new DeploymentFailureMails(controller.zoneRegistry()); } @Override public Optional<RunStatus> run(LockedStep step, RunId id) { DualLogger logger = new DualLogger(id, step.get()); try { switch (step.get()) { case deployInitialReal: return deployInitialReal(id, logger); case installInitialReal: return installInitialReal(id, logger); case deployReal: return deployReal(id, logger); case deployTester: return deployTester(id, logger); case installReal: return installReal(id, logger); case installTester: return installTester(id, logger); case startTests: return startTests(id, logger); case endTests: return endTests(id, logger); case copyVespaLogs: return copyVespaLogs(id, logger); case deactivateReal: return deactivateReal(id, logger); case deactivateTester: return deactivateTester(id, logger); case report: return report(id, logger); default: throw new AssertionError("Unknown step '" + step + "'!"); } } catch (UncheckedIOException e) { logger.log(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e)); return Optional.empty(); } catch (RuntimeException e) { logger.log(WARNING, "Unexpected exception running " + id, e); if (JobProfile.of(id.type()).alwaysRun().contains(step.get())) { logger.log("Will keep trying, as this is a cleanup step."); return Optional.empty(); } return Optional.of(error); } } private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) { Versions versions = controller.jobController().run(id).get().versions(); logger.log("Deploying platform version " + versions.sourcePlatform().orElse(versions.targetPlatform()) + " and application version " + versions.sourceApplication().orElse(versions.targetApplication()).id() + " ..."); return deployReal(id, true, versions, logger); } private Optional<RunStatus> deployReal(RunId id, DualLogger logger) { Versions versions = controller.jobController().run(id).get().versions(); logger.log("Deploying platform version " + versions.targetPlatform() + " and application version " + versions.targetApplication().id() + " ..."); return deployReal(id, false, versions, logger); } private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, Versions versions, DualLogger logger) { Optional<ApplicationPackage> applicationPackage = id.type().environment().isManuallyDeployed() ? Optional.of(new ApplicationPackage(controller.applications().applicationStore() .getDev(id.application(), id.type().zone(controller.system())))) : Optional.empty(); Optional<Version> vespaVersion = id.type().environment().isManuallyDeployed() ? Optional.of(versions.targetPlatform()) : Optional.empty(); return deploy(id.application(), id.type(), () -> controller.applications().deploy(id.application(), id.type().zone(controller.system()), applicationPackage, new DeployOptions(false, vespaVersion, false, setTheStage)), logger); } private Optional<RunStatus> deployTester(RunId id, DualLogger logger) { Version platform = controller.jobController().run(id).get().versions().targetPlatform(); logger.log("Deploying the tester container on platform " + platform + " ..."); return deploy(id.tester().id(), id.type(), () -> controller.applications().deployTester(id.tester(), testerPackage(id), id.type().zone(controller.system()), new DeployOptions(true, Optional.of(platform), false, false)), logger); } private Optional<RunStatus> deploy(ApplicationId id, JobType type, Supplier<ActivateResult> deployment, DualLogger logger) { try { PrepareResponse prepareResponse = deployment.get().prepareResponse(); if ( ! prepareResponse.configChangeActions.refeedActions.stream().allMatch(action -> action.allowed)) { List<String> messages = new ArrayList<>(); messages.add("Deploy failed due to non-compatible changes that require re-feed."); messages.add("Your options are:"); messages.add("1. Revert the incompatible changes."); messages.add("2. If you think it is safe in your case, you can override this validation, see"); messages.add(" http: messages.add("3. Deploy as a new application under a different name."); messages.add("Illegal actions:"); prepareResponse.configChangeActions.refeedActions.stream() .filter(action -> ! action.allowed) .flatMap(action -> action.messages.stream()) .forEach(messages::add); messages.add("Details:"); prepareResponse.log.stream() .map(entry -> entry.message) .forEach(messages::add); logger.log(messages); return Optional.of(deploymentFailed); } if (prepareResponse.configChangeActions.restartActions.isEmpty()) logger.log("No services requiring restart."); else prepareResponse.configChangeActions.restartActions.stream() .flatMap(action -> action.services.stream()) .map(service -> service.hostName) .sorted().distinct() .map(Hostname::new) .forEach(hostname -> { controller.applications().restart(new DeploymentId(id, type.zone(controller.system())), Optional.of(hostname)); logger.log("Restarting services on host " + hostname.id() + "."); }); logger.log("Deployment successful."); return Optional.of(running); } catch (ConfigServerException e) { if ( e.getErrorCode() == OUT_OF_CAPACITY && type.isTest() || e.getErrorCode() == ACTIVATION_CONFLICT || e.getErrorCode() == APPLICATION_LOCK_FAILURE || e.getErrorCode() == PARENT_HOST_NOT_READY) { logger.log("Will retry, because of '" + e.getErrorCode() + "' deploying:\n" + e.getMessage()); return Optional.empty(); } if ( e.getErrorCode() == INVALID_APPLICATION_PACKAGE || e.getErrorCode() == BAD_REQUEST) { logger.log("Deployment failed: " + e.getMessage()); return Optional.of(deploymentFailed); } throw e; } } private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) { return installReal(id, true, logger); } private Optional<RunStatus> installReal(RunId id, DualLogger logger) { return installReal(id, false, logger); } private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if ( ! deployment.isPresent()) { logger.log(INFO, "Deployment expired before installation was successful."); return Optional.of(installationFailed); } Versions versions = controller.jobController().run(id).get().versions(); Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform(); ApplicationVersion application = setTheStage ? versions.sourceApplication().orElse(versions.targetApplication()) : versions.targetApplication(); logger.log("Checking installation of " + platform + " and " + application.id() + " ..."); if ( nodesConverged(id.application(), id.type(), platform, logger) && servicesConverged(id.application(), id.type(), logger)) { if (endpointsAvailable(id.application(), id.type().zone(controller.system()), logger)) { logger.log("Installation succeeded!"); return Optional.of(running); } else if (timedOut(deployment.get(), endpointTimeout)) { logger.log(WARNING, "Endpoints failed to show up within " + endpointTimeout.toMinutes() + " minutes!"); return Optional.of(error); } } if (timedOut(deployment.get(), installationTimeout)) { logger.log(INFO, "Installation failed to complete within " + installationTimeout.toMinutes() + " minutes!"); return Optional.of(installationFailed); } logger.log("Installation not yet complete."); return Optional.empty(); } private Optional<RunStatus> installTester(RunId id, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if ( ! deployment.isPresent()) { logger.log(WARNING, "Deployment expired before installation of tester was successful."); return Optional.of(error); } Version platform = controller.jobController().run(id).get().versions().targetPlatform(); logger.log("Checking installation of tester container ..."); if ( nodesConverged(id.tester().id(), id.type(), platform, logger) && servicesConverged(id.tester().id(), id.type(), logger)) { if (endpointsAvailable(id.tester().id(), id.type().zone(controller.system()), logger)) { logger.log("Tester container successfully installed!"); return Optional.of(running); } else if (timedOut(deployment.get(), endpointTimeout)) { logger.log(WARNING, "Tester failed to show up within " + endpointTimeout.toMinutes() + " minutes!"); return Optional.of(error); } } if (timedOut(deployment.get(), installationTimeout)) { logger.log(WARNING, "Installation of tester failed to complete within " + installationTimeout.toMinutes() + " minutes of real deployment!"); return Optional.of(error); } logger.log("Installation of tester not yet complete."); return Optional.empty(); } private boolean endpointsAvailable(ApplicationId id, ZoneId zoneId, DualLogger logger) { logger.log("Attempting to find deployment endpoints ..."); Map<ZoneId, List<URI>> endpoints = deploymentEndpoints(id, Set.of(zoneId)); if ( ! endpoints.containsKey(zoneId)) { logger.log("Endpoints not yet ready."); return false; } List<String> messages = new ArrayList<>(); messages.add("Found endpoints:"); endpoints.forEach((zone, uris) -> { messages.add("- " + zone); uris.forEach(uri -> messages.add(" |-- " + uri)); }); logger.log(messages); return true; } private boolean nodesConverged(ApplicationId id, JobType type, Version target, DualLogger logger) { List<Node> nodes = controller.configServer().nodeRepository().list(type.zone(controller.system()), id, ImmutableSet.of(active, reserved)); List<String> statuses = nodes.stream() .map(node -> String.format("%70s: %-16s%-25s%-32s%s", node.hostname(), node.serviceState(), node.wantedVersion() + (node.currentVersion().equals(node.wantedVersion()) ? "" : " <-- " + node.currentVersion()), node.restartGeneration() >= node.wantedRestartGeneration() ? "" : "restart pending (" + node.wantedRestartGeneration() + " <-- " + node.restartGeneration() + ")", node.rebootGeneration() >= node.wantedRebootGeneration() ? "" : "reboot pending (" + node.wantedRebootGeneration() + " <-- " + node.rebootGeneration() + ")")) .collect(Collectors.toList()); logger.log(statuses); return nodes.stream().allMatch(node -> node.currentVersion().equals(target) && node.restartGeneration() >= node.wantedRestartGeneration() && node.rebootGeneration() >= node.wantedRebootGeneration()); } private boolean servicesConverged(ApplicationId id, JobType type, DualLogger logger) { Optional<ServiceConvergence> convergence = controller.configServer().serviceConvergence(new DeploymentId(id, type.zone(controller.system()))); if ( ! convergence.isPresent()) { logger.log("Config status not currently available -- will retry."); return false; } logger.log("Wanted config generation is " + convergence.get().wantedGeneration()); List<String> statuses = convergence.get().services().stream() .filter(serviceStatus -> serviceStatus.currentGeneration() != convergence.get().wantedGeneration()) .map(serviceStatus -> String.format("%70s: %11s on port %4d has %s", serviceStatus.host().value(), serviceStatus.type(), serviceStatus.port(), serviceStatus.currentGeneration() == -1 ? "not started!" : Long.toString(serviceStatus.currentGeneration()))) .collect(Collectors.toList()); logger.log(statuses); if (statuses.isEmpty()) logger.log("All services on wanted config generation."); return convergence.get().converged(); } private Optional<RunStatus> startTests(RunId id, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if ( ! deployment.isPresent()) { logger.log(INFO, "Deployment expired before tests could start."); return Optional.of(aborted); } Set<ZoneId> zones = testedZoneAndProductionZones(id); logger.log("Attempting to find endpoints ..."); Map<ZoneId, List<URI>> endpoints = deploymentEndpoints(id.application(), zones); if ( ! endpoints.containsKey(id.type().zone(controller.system())) && timedOut(deployment.get(), endpointTimeout)) { logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!"); return Optional.of(error); } List<String> messages = new ArrayList<>(); messages.add("Found endpoints:"); endpoints.forEach((zone, uris) -> { messages.add("- " + zone); uris.forEach(uri -> messages.add(" |-- " + uri)); }); logger.log(messages); Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id); if (testerEndpoint.isEmpty() && timedOut(deployment.get(), endpointTimeout)) { logger.log(WARNING, "Endpoints for the tester container vanished again, while it was still active!"); return Optional.of(error); } if (controller.jobController().cloud().ready(testerEndpoint.get())) { logger.log("Starting tests ..."); controller.jobController().cloud().startTests(testerEndpoint.get(), TesterCloud.Suite.of(id.type()), testConfig(id.application(), id.type().zone(controller.system()), controller.system(), endpoints, listClusters(id.application(), zones))); return Optional.of(running); } logger.log("Tester container not yet ready."); return Optional.empty(); } private Optional<RunStatus> endTests(RunId id, DualLogger logger) { if ( ! deployment(id.application(), id.type()).isPresent()) { logger.log(INFO, "Deployment expired before tests could complete."); return Optional.of(aborted); } Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id); if ( ! testerEndpoint.isPresent()) { logger.log("Endpoints for tester not found -- trying again later."); return Optional.empty(); } controller.jobController().updateTestLog(id); TesterCloud.Status testStatus = controller.jobController().cloud().getStatus(testerEndpoint.get()); switch (testStatus) { case NOT_STARTED: throw new IllegalStateException("Tester reports tests not started, even though they should have!"); case RUNNING: return Optional.empty(); case FAILURE: logger.log("Tests failed."); return Optional.of(testFailure); case ERROR: logger.log(INFO, "Tester failed running its tests!"); return Optional.of(error); case SUCCESS: logger.log("Tests completed successfully."); return Optional.of(running); default: throw new IllegalStateException("Unknown status '" + testStatus + "'!"); } } private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) { ZoneId zone = id.type().zone(controller.system()); if (deployment(id.application(), id.type()).isPresent()) try { logger.log("Copying Vespa log from nodes of " + id.application() + " in " + zone + " ..."); List<LogEntry> entries = new ArrayList<>(); String logs = IOUtils.readAll(controller.configServer().getLogs(new DeploymentId(id.application(), zone), Collections.emptyMap()), StandardCharsets.UTF_8); for (String line : logs.split("\n")) { String[] parts = line.split("\t"); if (parts.length != 7) continue; entries.add(new LogEntry(0, (long) (Double.parseDouble(parts[0]) * 1000), LogEntry.typeOf(LogLevel.parse(parts[5])), parts[1] + '\t' + parts[3] + '\t' + parts[4] + '\n' + parts[6].replaceAll("\\\\n", "\n") .replaceAll("\\\\t", "\t"))); } controller.jobController().log(id, Step.copyVespaLogs, entries); } catch (Exception e) { logger.log(INFO, "Failure getting vespa logs for " + id, e); return Optional.of(error); } return Optional.of(running); } private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) { try { logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ..."); controller.applications().deactivate(id.application(), id.type().zone(controller.system())); return Optional.of(running); } catch (RuntimeException e) { logger.log(WARNING, "Failed deleting application " + id.application(), e); return Optional.of(error); } } private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) { try { logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ..."); controller.jobController().deactivateTester(id.tester(), id.type()); return Optional.of(running); } catch (RuntimeException e) { logger.log(WARNING, "Failed deleting tester of " + id.application(), e); return Optional.of(error); } } private Optional<RunStatus> report(RunId id, DualLogger logger) { try { controller.jobController().active(id).ifPresent(run -> { JobReport report = JobReport.ofJob(run.id().application(), run.id().type(), run.id().number(), run.hasFailed() ? Optional.of(DeploymentJobs.JobError.unknown) : Optional.empty()); controller.applications().deploymentTrigger().notifyOfCompletion(report); if (run.hasFailed()) sendNotification(run, logger); }); } catch (IllegalStateException e) { logger.log(INFO, "Job '" + id.type() + "'no longer supposed to run?:", e); return Optional.of(error); } return Optional.of(running); } /** Sends a mail with a notification of a failed run, if one should be sent. */ private void sendNotification(Run run, DualLogger logger) { Application application = controller.applications().require(run.id().application()); Notifications notifications = application.deploymentSpec().notifications(); boolean newCommit = application.change().application() .map(run.versions().targetApplication()::equals) .orElse(false); When when = newCommit ? failingCommit : failing; List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when)); if (notifications.emailRolesFor(when).contains(author)) run.versions().targetApplication().authorEmail().ifPresent(recipients::add); if (recipients.isEmpty()) return; try { if (run.status() == outOfCapacity && run.id().type().isProduction()) controller.mailer().send(mails.outOfCapacity(run.id(), recipients)); if (run.status() == deploymentFailed) controller.mailer().send(mails.deploymentFailure(run.id(), recipients)); if (run.status() == installationFailed) controller.mailer().send(mails.installationFailure(run.id(), recipients)); if (run.status() == testFailure) controller.mailer().send(mails.testFailure(run.id(), recipients)); if (run.status() == error) controller.mailer().send(mails.systemError(run.id(), recipients)); } catch (RuntimeException e) { logger.log(INFO, "Exception trying to send mail for " + run.id(), e); } } /** Returns the deployment of the real application in the zone of the given job, if it exists. */ private Optional<Deployment> deployment(ApplicationId id, JobType type) { return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system()))); } /** Returns the real application with the given id. */ /** * Returns whether the time since deployment is more than the zone deployment expiry, or the given timeout. * * We time out the job before the deployment expires, for zone where deployments are not persistent, * to be able to collect the Vespa log from the deployment. Thus, the lower of the zone's deployment expiry, * and the given default installation timeout, minus one minute, is used as a timeout threshold. */ private boolean timedOut(Deployment deployment, Duration defaultTimeout) { Duration timeout = controller.zoneRegistry().getDeploymentTimeToLive(deployment.zone()) .filter(zoneTimeout -> zoneTimeout.compareTo(defaultTimeout) < 0) .orElse(defaultTimeout); return deployment.at().isBefore(controller.clock().instant().minus(timeout.minus(Duration.ofMinutes(1)))); } /** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */ private ApplicationPackage testerPackage(RunId id) { ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication(); DeploymentSpec spec = controller.applications().require(id.application()).deploymentSpec(); byte[] servicesXml = servicesXml(controller.system(), testerFlavorFor(id, spec)); byte[] testPackage = controller.applications().applicationStore().get(id.tester(), version); ZoneId zone = id.type().zone(controller.system()); byte[] deploymentXml = deploymentXml(spec.athenzDomain(), spec.athenzService(zone.environment(), zone.region())); try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) { zipBuilder.add(testPackage); zipBuilder.add("services.xml", servicesXml); zipBuilder.add("deployment.xml", deploymentXml); zipBuilder.close(); return new ApplicationPackage(zipBuilder.toByteArray()); } } private static Optional<String> testerFlavorFor(RunId id, DeploymentSpec spec) { for (DeploymentSpec.Step step : spec.steps()) if (step.deploysTo(id.type().environment())) return step.zones().get(0).testerFlavor(); throw new IllegalStateException("No step deploys to the zone this run is for!"); } /** Returns a stream containing the zone of the deployment tested in the given run, and all production zones for the application. */ private Set<ZoneId> testedZoneAndProductionZones(RunId id) { return Stream.concat(Stream.of(id.type().zone(controller.system())), application(id.application()).productionDeployments().keySet().stream()) .collect(Collectors.toSet()); } /** Returns all endpoints for all current deployments of the given real application. */ private Map<ZoneId, List<URI>> deploymentEndpoints(ApplicationId id, Iterable<ZoneId> zones) { ImmutableMap.Builder<ZoneId, List<URI>> deployments = ImmutableMap.builder(); for (ZoneId zone : zones) { controller.applications().getDeploymentEndpoints(new DeploymentId(id, zone)) .filter(endpoints -> ! endpoints.isEmpty()) .or(() -> Optional.of(controller.applications().routingPolicies(new DeploymentId(id, zone)).stream() .map(policy -> policy.endpointIn(controller.system()).url()) .collect(Collectors.toUnmodifiableList())) .filter(endpoints -> ! endpoints.isEmpty())) .ifPresent(endpoints -> deployments.put(zone, endpoints)); } return deployments.build(); } /** Returns all content clusters in all current deployments of the given real application. */ private Map<ZoneId, List<String>> listClusters(ApplicationId id, Iterable<ZoneId> zones) { ImmutableMap.Builder<ZoneId, List<String>> clusters = ImmutableMap.builder(); for (ZoneId zone : zones) clusters.put(zone, ImmutableList.copyOf(controller.configServer().getContentClusters(new DeploymentId(id, zone)))); return clusters.build(); } /** Returns the generated services.xml content for the tester application. */ static byte[] servicesXml(SystemName systemName, Optional<String> testerFlavor) { String domain = systemName == SystemName.main ? "vespa.vespa" : "vespa.vespa.cd"; String flavor = testerFlavor.orElse("d-1-4-50"); int memoryGb = Integer.parseInt(flavor.split("-")[2]); int jdiscMemoryPercentage = (int) Math.ceil(200.0 / memoryGb); int testMemoryMb = 512 * (memoryGb - 2); String servicesXml = "<?xml version='1.0' encoding='UTF-8'?>\n" + "<services xmlns:deploy='vespa' version='1.0'>\n" + " <container version='1.0' id='tester'>\n" + "\n" + " <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" + " <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" + " <artifactsPath>artifacts</artifactsPath>\n" + " <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" + " </config>\n" + " </component>\n" + "\n" + " <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" + " <binding>http: " </handler>\n" + "\n" + " <http>\n" + " <server id='default' port='4080'/>\n" + " <filtering>\n" + " <access-control domain='" + domain + "'>\n" + " <exclude>\n" + " <binding>http: " </exclude>\n" + " </access-control>\n" + " <request-chain id=\"testrunner-api\">\n" + " <filter id='authz-filter' class='com.yahoo.jdisc.http.filter.security.athenz.AthenzAuthorizationFilter' bundle=\"jdisc-security-filters\">\n" + " <config name=\"jdisc.http.filter.security.athenz.athenz-authorization-filter\">\n" + " <credentialsToVerify>TOKEN_ONLY</credentialsToVerify>\n" + " <roleTokenHeaderName>Yahoo-Role-Auth</roleTokenHeaderName>\n" + " </config>\n" + " <component id=\"com.yahoo.jdisc.http.filter.security.athenz.StaticRequestResourceMapper\" bundle=\"jdisc-security-filters\">\n" + " <config name=\"jdisc.http.filter.security.athenz.static-request-resource-mapper\">\n" + " <resourceName>" + domain + ":tester-application</resourceName>\n" + " <action>deploy</action>\n" + " </config>\n" + " </component>\n" + " </filter>\n" + " </request-chain>\n" + " </filtering>\n" + " </http>\n" + "\n" + " <nodes count=\"1\" flavor=\"" + flavor + "\" allocated-memory=\"" + jdiscMemoryPercentage + "%\" />\n" + " </container>\n" + "</services>\n"; return servicesXml.getBytes(StandardCharsets.UTF_8); } /** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */ private static byte[] deploymentXml(Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) { String deploymentSpec = "<?xml version='1.0' encoding='UTF-8'?>\n" + "<deployment version=\"1.0\" " + athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") + athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + "/>"; return deploymentSpec.getBytes(StandardCharsets.UTF_8); } /** Returns the config for the tests to run for the given job. */ private static byte[] testConfig(ApplicationId id, ZoneId testerZone, SystemName system, Map<ZoneId, List<URI>> deployments, Map<ZoneId, List<String>> clusters) { Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString("application", id.serializedForm()); root.setString("zone", testerZone.value()); root.setString("system", system.value()); Cursor endpointsObject = root.setObject("endpoints"); deployments.forEach((zone, endpoints) -> { Cursor endpointArray = endpointsObject.setArray(zone.value()); for (URI endpoint : endpoints) endpointArray.addString(endpoint.toString()); }); Cursor clustersObject = root.setObject("clusters"); clusters.forEach((zone, clusterList) -> { Cursor clusterArray = clustersObject.setArray(zone.value()); for (String cluster : clusterList) clusterArray.addString(cluster); }); try { return SlimeUtils.toJsonBytes(slime); } catch (IOException e) { throw new UncheckedIOException(e); } } /** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */ private class DualLogger { private final RunId id; private final Step step; private DualLogger(RunId id, Step step) { this.id = id; this.step = step; } private void log(String... messages) { log(List.of(messages)); } private void log(List<String> messages) { controller.jobController().log(id, step, DEBUG, messages); } private void log(Level level, String message) { log(level, message, null); } private void log(Level level, String message, Throwable thrown) { logger.log(level, id + " at " + step + ": " + message, thrown); if (thrown != null) { ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream(); thrown.printStackTrace(new PrintStream(traceBuffer)); message += "\n" + traceBuffer; } controller.jobController().log(id, step, level, message); } } }
Correct about everything :)
private Application application(ApplicationId id) { controller.applications().lockOrThrow(id, __ -> { }); return controller.applications().require(id); }
controller.applications().lockOrThrow(id, __ -> { });
private Application application(ApplicationId id) { controller.applications().lockOrThrow(id, __ -> { }); return controller.applications().require(id); }
class InternalStepRunner implements StepRunner { private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName()); static final Duration endpointTimeout = Duration.ofMinutes(15); static final Duration installationTimeout = Duration.ofMinutes(150); private final Controller controller; private final DeploymentFailureMails mails; public InternalStepRunner(Controller controller) { this.controller = controller; this.mails = new DeploymentFailureMails(controller.zoneRegistry()); } @Override public Optional<RunStatus> run(LockedStep step, RunId id) { DualLogger logger = new DualLogger(id, step.get()); try { switch (step.get()) { case deployInitialReal: return deployInitialReal(id, logger); case installInitialReal: return installInitialReal(id, logger); case deployReal: return deployReal(id, logger); case deployTester: return deployTester(id, logger); case installReal: return installReal(id, logger); case installTester: return installTester(id, logger); case startTests: return startTests(id, logger); case endTests: return endTests(id, logger); case copyVespaLogs: return copyVespaLogs(id, logger); case deactivateReal: return deactivateReal(id, logger); case deactivateTester: return deactivateTester(id, logger); case report: return report(id, logger); default: throw new AssertionError("Unknown step '" + step + "'!"); } } catch (UncheckedIOException e) { logger.log(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e)); return Optional.empty(); } catch (RuntimeException e) { logger.log(WARNING, "Unexpected exception running " + id, e); if (JobProfile.of(id.type()).alwaysRun().contains(step.get())) { logger.log("Will keep trying, as this is a cleanup step."); return Optional.empty(); } return Optional.of(error); } } private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) { Versions versions = controller.jobController().run(id).get().versions(); logger.log("Deploying platform version " + versions.sourcePlatform().orElse(versions.targetPlatform()) + " and application version " + versions.sourceApplication().orElse(versions.targetApplication()).id() + " ..."); return deployReal(id, true, versions, logger); } private Optional<RunStatus> deployReal(RunId id, DualLogger logger) { Versions versions = controller.jobController().run(id).get().versions(); logger.log("Deploying platform version " + versions.targetPlatform() + " and application version " + versions.targetApplication().id() + " ..."); return deployReal(id, false, versions, logger); } private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, Versions versions, DualLogger logger) { Optional<ApplicationPackage> applicationPackage = id.type().environment().isManuallyDeployed() ? Optional.of(new ApplicationPackage(controller.applications().applicationStore() .getDev(id.application(), id.type().zone(controller.system())))) : Optional.empty(); Optional<Version> vespaVersion = id.type().environment().isManuallyDeployed() ? Optional.of(versions.targetPlatform()) : Optional.empty(); return deploy(id.application(), id.type(), () -> controller.applications().deploy(id.application(), id.type().zone(controller.system()), applicationPackage, new DeployOptions(false, vespaVersion, false, setTheStage)), logger); } private Optional<RunStatus> deployTester(RunId id, DualLogger logger) { Version platform = controller.jobController().run(id).get().versions().targetPlatform(); logger.log("Deploying the tester container on platform " + platform + " ..."); return deploy(id.tester().id(), id.type(), () -> controller.applications().deployTester(id.tester(), testerPackage(id), id.type().zone(controller.system()), new DeployOptions(true, Optional.of(platform), false, false)), logger); } private Optional<RunStatus> deploy(ApplicationId id, JobType type, Supplier<ActivateResult> deployment, DualLogger logger) { try { PrepareResponse prepareResponse = deployment.get().prepareResponse(); if ( ! prepareResponse.configChangeActions.refeedActions.stream().allMatch(action -> action.allowed)) { List<String> messages = new ArrayList<>(); messages.add("Deploy failed due to non-compatible changes that require re-feed."); messages.add("Your options are:"); messages.add("1. Revert the incompatible changes."); messages.add("2. If you think it is safe in your case, you can override this validation, see"); messages.add(" http: messages.add("3. Deploy as a new application under a different name."); messages.add("Illegal actions:"); prepareResponse.configChangeActions.refeedActions.stream() .filter(action -> ! action.allowed) .flatMap(action -> action.messages.stream()) .forEach(messages::add); messages.add("Details:"); prepareResponse.log.stream() .map(entry -> entry.message) .forEach(messages::add); logger.log(messages); return Optional.of(deploymentFailed); } if (prepareResponse.configChangeActions.restartActions.isEmpty()) logger.log("No services requiring restart."); else prepareResponse.configChangeActions.restartActions.stream() .flatMap(action -> action.services.stream()) .map(service -> service.hostName) .sorted().distinct() .map(Hostname::new) .forEach(hostname -> { controller.applications().restart(new DeploymentId(id, type.zone(controller.system())), Optional.of(hostname)); logger.log("Restarting services on host " + hostname.id() + "."); }); logger.log("Deployment successful."); return Optional.of(running); } catch (ConfigServerException e) { if ( e.getErrorCode() == OUT_OF_CAPACITY && type.isTest() || e.getErrorCode() == ACTIVATION_CONFLICT || e.getErrorCode() == APPLICATION_LOCK_FAILURE || e.getErrorCode() == PARENT_HOST_NOT_READY) { logger.log("Will retry, because of '" + e.getErrorCode() + "' deploying:\n" + e.getMessage()); return Optional.empty(); } if ( e.getErrorCode() == INVALID_APPLICATION_PACKAGE || e.getErrorCode() == BAD_REQUEST) { logger.log("Deployment failed: " + e.getMessage()); return Optional.of(deploymentFailed); } throw e; } } private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) { return installReal(id, true, logger); } private Optional<RunStatus> installReal(RunId id, DualLogger logger) { return installReal(id, false, logger); } private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if ( ! deployment.isPresent()) { logger.log(INFO, "Deployment expired before installation was successful."); return Optional.of(installationFailed); } Versions versions = controller.jobController().run(id).get().versions(); Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform(); ApplicationVersion application = setTheStage ? versions.sourceApplication().orElse(versions.targetApplication()) : versions.targetApplication(); logger.log("Checking installation of " + platform + " and " + application.id() + " ..."); if ( nodesConverged(id.application(), id.type(), platform, logger) && servicesConverged(id.application(), id.type(), logger)) { if (endpointsAvailable(id.application(), id.type().zone(controller.system()), logger)) { logger.log("Installation succeeded!"); return Optional.of(running); } else if (timedOut(deployment.get(), endpointTimeout)) { logger.log(WARNING, "Endpoints failed to show up within " + endpointTimeout.toMinutes() + " minutes!"); return Optional.of(error); } } if (timedOut(deployment.get(), installationTimeout)) { logger.log(INFO, "Installation failed to complete within " + installationTimeout.toMinutes() + " minutes!"); return Optional.of(installationFailed); } logger.log("Installation not yet complete."); return Optional.empty(); } private Optional<RunStatus> installTester(RunId id, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if ( ! deployment.isPresent()) { logger.log(WARNING, "Deployment expired before installation of tester was successful."); return Optional.of(error); } Version platform = controller.jobController().run(id).get().versions().targetPlatform(); logger.log("Checking installation of tester container ..."); if ( nodesConverged(id.tester().id(), id.type(), platform, logger) && servicesConverged(id.tester().id(), id.type(), logger)) { if (endpointsAvailable(id.tester().id(), id.type().zone(controller.system()), logger)) { logger.log("Tester container successfully installed!"); return Optional.of(running); } else if (timedOut(deployment.get(), endpointTimeout)) { logger.log(WARNING, "Tester failed to show up within " + endpointTimeout.toMinutes() + " minutes!"); return Optional.of(error); } } if (timedOut(deployment.get(), installationTimeout)) { logger.log(WARNING, "Installation of tester failed to complete within " + installationTimeout.toMinutes() + " minutes of real deployment!"); return Optional.of(error); } logger.log("Installation of tester not yet complete."); return Optional.empty(); } private boolean endpointsAvailable(ApplicationId id, ZoneId zoneId, DualLogger logger) { logger.log("Attempting to find deployment endpoints ..."); Map<ZoneId, List<URI>> endpoints = deploymentEndpoints(id, Set.of(zoneId)); if ( ! endpoints.containsKey(zoneId)) { logger.log("Endpoints not yet ready."); return false; } List<String> messages = new ArrayList<>(); messages.add("Found endpoints:"); endpoints.forEach((zone, uris) -> { messages.add("- " + zone); uris.forEach(uri -> messages.add(" |-- " + uri)); }); logger.log(messages); return true; } private boolean nodesConverged(ApplicationId id, JobType type, Version target, DualLogger logger) { List<Node> nodes = controller.configServer().nodeRepository().list(type.zone(controller.system()), id, ImmutableSet.of(active, reserved)); List<String> statuses = nodes.stream() .map(node -> String.format("%70s: %-16s%-25s%-32s%s", node.hostname(), node.serviceState(), node.wantedVersion() + (node.currentVersion().equals(node.wantedVersion()) ? "" : " <-- " + node.currentVersion()), node.restartGeneration() >= node.wantedRestartGeneration() ? "" : "restart pending (" + node.wantedRestartGeneration() + " <-- " + node.restartGeneration() + ")", node.rebootGeneration() >= node.wantedRebootGeneration() ? "" : "reboot pending (" + node.wantedRebootGeneration() + " <-- " + node.rebootGeneration() + ")")) .collect(Collectors.toList()); logger.log(statuses); return nodes.stream().allMatch(node -> node.currentVersion().equals(target) && node.restartGeneration() >= node.wantedRestartGeneration() && node.rebootGeneration() >= node.wantedRebootGeneration()); } private boolean servicesConverged(ApplicationId id, JobType type, DualLogger logger) { Optional<ServiceConvergence> convergence = controller.configServer().serviceConvergence(new DeploymentId(id, type.zone(controller.system()))); if ( ! convergence.isPresent()) { logger.log("Config status not currently available -- will retry."); return false; } logger.log("Wanted config generation is " + convergence.get().wantedGeneration()); List<String> statuses = convergence.get().services().stream() .filter(serviceStatus -> serviceStatus.currentGeneration() != convergence.get().wantedGeneration()) .map(serviceStatus -> String.format("%70s: %11s on port %4d has %s", serviceStatus.host().value(), serviceStatus.type(), serviceStatus.port(), serviceStatus.currentGeneration() == -1 ? "not started!" : Long.toString(serviceStatus.currentGeneration()))) .collect(Collectors.toList()); logger.log(statuses); if (statuses.isEmpty()) logger.log("All services on wanted config generation."); return convergence.get().converged(); } private Optional<RunStatus> startTests(RunId id, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if ( ! deployment.isPresent()) { logger.log(INFO, "Deployment expired before tests could start."); return Optional.of(aborted); } Set<ZoneId> zones = testedZoneAndProductionZones(id); logger.log("Attempting to find endpoints ..."); Map<ZoneId, List<URI>> endpoints = deploymentEndpoints(id.application(), zones); if ( ! endpoints.containsKey(id.type().zone(controller.system())) && timedOut(deployment.get(), endpointTimeout)) { logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!"); return Optional.of(error); } List<String> messages = new ArrayList<>(); messages.add("Found endpoints:"); endpoints.forEach((zone, uris) -> { messages.add("- " + zone); uris.forEach(uri -> messages.add(" |-- " + uri)); }); logger.log(messages); Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id); if (testerEndpoint.isEmpty() && timedOut(deployment.get(), endpointTimeout)) { logger.log(WARNING, "Endpoints for the tester container vanished again, while it was still active!"); return Optional.of(error); } if (controller.jobController().cloud().ready(testerEndpoint.get())) { logger.log("Starting tests ..."); controller.jobController().cloud().startTests(testerEndpoint.get(), TesterCloud.Suite.of(id.type()), testConfig(id.application(), id.type().zone(controller.system()), controller.system(), endpoints, listClusters(id.application(), zones))); return Optional.of(running); } logger.log("Tester container not yet ready."); return Optional.empty(); } private Optional<RunStatus> endTests(RunId id, DualLogger logger) { if ( ! deployment(id.application(), id.type()).isPresent()) { logger.log(INFO, "Deployment expired before tests could complete."); return Optional.of(aborted); } Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id); if ( ! testerEndpoint.isPresent()) { logger.log("Endpoints for tester not found -- trying again later."); return Optional.empty(); } controller.jobController().updateTestLog(id); TesterCloud.Status testStatus = controller.jobController().cloud().getStatus(testerEndpoint.get()); switch (testStatus) { case NOT_STARTED: throw new IllegalStateException("Tester reports tests not started, even though they should have!"); case RUNNING: return Optional.empty(); case FAILURE: logger.log("Tests failed."); return Optional.of(testFailure); case ERROR: logger.log(INFO, "Tester failed running its tests!"); return Optional.of(error); case SUCCESS: logger.log("Tests completed successfully."); return Optional.of(running); default: throw new IllegalStateException("Unknown status '" + testStatus + "'!"); } } private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) { ZoneId zone = id.type().zone(controller.system()); if (deployment(id.application(), id.type()).isPresent()) try { logger.log("Copying Vespa log from nodes of " + id.application() + " in " + zone + " ..."); List<LogEntry> entries = new ArrayList<>(); String logs = IOUtils.readAll(controller.configServer().getLogs(new DeploymentId(id.application(), zone), Collections.emptyMap()), StandardCharsets.UTF_8); for (String line : logs.split("\n")) { String[] parts = line.split("\t"); if (parts.length != 7) continue; entries.add(new LogEntry(0, (long) (Double.parseDouble(parts[0]) * 1000), LogEntry.typeOf(LogLevel.parse(parts[5])), parts[1] + '\t' + parts[3] + '\t' + parts[4] + '\n' + parts[6].replaceAll("\\\\n", "\n") .replaceAll("\\\\t", "\t"))); } controller.jobController().log(id, Step.copyVespaLogs, entries); } catch (Exception e) { logger.log(INFO, "Failure getting vespa logs for " + id, e); return Optional.of(error); } return Optional.of(running); } private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) { try { logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ..."); controller.applications().deactivate(id.application(), id.type().zone(controller.system())); return Optional.of(running); } catch (RuntimeException e) { logger.log(WARNING, "Failed deleting application " + id.application(), e); return Optional.of(error); } } private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) { try { logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ..."); controller.jobController().deactivateTester(id.tester(), id.type()); return Optional.of(running); } catch (RuntimeException e) { logger.log(WARNING, "Failed deleting tester of " + id.application(), e); return Optional.of(error); } } private Optional<RunStatus> report(RunId id, DualLogger logger) { try { controller.jobController().active(id).ifPresent(run -> { JobReport report = JobReport.ofJob(run.id().application(), run.id().type(), run.id().number(), run.hasFailed() ? Optional.of(DeploymentJobs.JobError.unknown) : Optional.empty()); controller.applications().deploymentTrigger().notifyOfCompletion(report); if (run.hasFailed()) sendNotification(run, logger); }); } catch (IllegalStateException e) { logger.log(INFO, "Job '" + id.type() + "'no longer supposed to run?:", e); return Optional.of(error); } return Optional.of(running); } /** Sends a mail with a notification of a failed run, if one should be sent. */ private void sendNotification(Run run, DualLogger logger) { Application application = controller.applications().require(run.id().application()); Notifications notifications = application.deploymentSpec().notifications(); boolean newCommit = application.change().application() .map(run.versions().targetApplication()::equals) .orElse(false); When when = newCommit ? failingCommit : failing; List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when)); if (notifications.emailRolesFor(when).contains(author)) run.versions().targetApplication().authorEmail().ifPresent(recipients::add); if (recipients.isEmpty()) return; try { if (run.status() == outOfCapacity && run.id().type().isProduction()) controller.mailer().send(mails.outOfCapacity(run.id(), recipients)); if (run.status() == deploymentFailed) controller.mailer().send(mails.deploymentFailure(run.id(), recipients)); if (run.status() == installationFailed) controller.mailer().send(mails.installationFailure(run.id(), recipients)); if (run.status() == testFailure) controller.mailer().send(mails.testFailure(run.id(), recipients)); if (run.status() == error) controller.mailer().send(mails.systemError(run.id(), recipients)); } catch (RuntimeException e) { logger.log(INFO, "Exception trying to send mail for " + run.id(), e); } } /** Returns the deployment of the real application in the zone of the given job, if it exists. */ private Optional<Deployment> deployment(ApplicationId id, JobType type) { return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system()))); } /** Returns the real application with the given id. */ /** * Returns whether the time since deployment is more than the zone deployment expiry, or the given timeout. * * We time out the job before the deployment expires, for zone where deployments are not persistent, * to be able to collect the Vespa log from the deployment. Thus, the lower of the zone's deployment expiry, * and the given default installation timeout, minus one minute, is used as a timeout threshold. */ private boolean timedOut(Deployment deployment, Duration defaultTimeout) { Duration timeout = controller.zoneRegistry().getDeploymentTimeToLive(deployment.zone()) .filter(zoneTimeout -> zoneTimeout.compareTo(defaultTimeout) < 0) .orElse(defaultTimeout); return deployment.at().isBefore(controller.clock().instant().minus(timeout.minus(Duration.ofMinutes(1)))); } /** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */ private ApplicationPackage testerPackage(RunId id) { ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication(); DeploymentSpec spec = controller.applications().require(id.application()).deploymentSpec(); byte[] servicesXml = servicesXml(controller.system(), testerFlavorFor(id, spec)); byte[] testPackage = controller.applications().applicationStore().get(id.tester(), version); ZoneId zone = id.type().zone(controller.system()); byte[] deploymentXml = deploymentXml(spec.athenzDomain(), spec.athenzService(zone.environment(), zone.region())); try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) { zipBuilder.add(testPackage); zipBuilder.add("services.xml", servicesXml); zipBuilder.add("deployment.xml", deploymentXml); zipBuilder.close(); return new ApplicationPackage(zipBuilder.toByteArray()); } } private static Optional<String> testerFlavorFor(RunId id, DeploymentSpec spec) { for (DeploymentSpec.Step step : spec.steps()) if (step.deploysTo(id.type().environment())) return step.zones().get(0).testerFlavor(); throw new IllegalStateException("No step deploys to the zone this run is for!"); } /** Returns a stream containing the zone of the deployment tested in the given run, and all production zones for the application. */ private Set<ZoneId> testedZoneAndProductionZones(RunId id) { return Stream.concat(Stream.of(id.type().zone(controller.system())), application(id.application()).productionDeployments().keySet().stream()) .collect(Collectors.toSet()); } /** Returns all endpoints for all current deployments of the given real application. */ private Map<ZoneId, List<URI>> deploymentEndpoints(ApplicationId id, Iterable<ZoneId> zones) { ImmutableMap.Builder<ZoneId, List<URI>> deployments = ImmutableMap.builder(); for (ZoneId zone : zones) { controller.applications().getDeploymentEndpoints(new DeploymentId(id, zone)) .filter(endpoints -> ! endpoints.isEmpty()) .or(() -> Optional.of(controller.applications().routingPolicies(new DeploymentId(id, zone)).stream() .map(policy -> policy.endpointIn(controller.system()).url()) .collect(Collectors.toUnmodifiableList())) .filter(endpoints -> ! endpoints.isEmpty())) .ifPresent(endpoints -> deployments.put(zone, endpoints)); } return deployments.build(); } /** Returns all content clusters in all current deployments of the given real application. */ private Map<ZoneId, List<String>> listClusters(ApplicationId id, Iterable<ZoneId> zones) { ImmutableMap.Builder<ZoneId, List<String>> clusters = ImmutableMap.builder(); for (ZoneId zone : zones) clusters.put(zone, ImmutableList.copyOf(controller.configServer().getContentClusters(new DeploymentId(id, zone)))); return clusters.build(); } /** Returns the generated services.xml content for the tester application. */ static byte[] servicesXml(SystemName systemName, Optional<String> testerFlavor) { String domain = systemName == SystemName.main ? "vespa.vespa" : "vespa.vespa.cd"; String flavor = testerFlavor.orElse("d-1-4-50"); int memoryGb = Integer.parseInt(flavor.split("-")[2]); int jdiscMemoryPercentage = (int) Math.ceil(200.0 / memoryGb); int testMemoryMb = 512 * (memoryGb - 2); String servicesXml = "<?xml version='1.0' encoding='UTF-8'?>\n" + "<services xmlns:deploy='vespa' version='1.0'>\n" + " <container version='1.0' id='tester'>\n" + "\n" + " <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" + " <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" + " <artifactsPath>artifacts</artifactsPath>\n" + " <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" + " </config>\n" + " </component>\n" + "\n" + " <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" + " <binding>http: " </handler>\n" + "\n" + " <http>\n" + " <server id='default' port='4080'/>\n" + " <filtering>\n" + " <access-control domain='" + domain + "'>\n" + " <exclude>\n" + " <binding>http: " </exclude>\n" + " </access-control>\n" + " <request-chain id=\"testrunner-api\">\n" + " <filter id='authz-filter' class='com.yahoo.jdisc.http.filter.security.athenz.AthenzAuthorizationFilter' bundle=\"jdisc-security-filters\">\n" + " <config name=\"jdisc.http.filter.security.athenz.athenz-authorization-filter\">\n" + " <credentialsToVerify>TOKEN_ONLY</credentialsToVerify>\n" + " <roleTokenHeaderName>Yahoo-Role-Auth</roleTokenHeaderName>\n" + " </config>\n" + " <component id=\"com.yahoo.jdisc.http.filter.security.athenz.StaticRequestResourceMapper\" bundle=\"jdisc-security-filters\">\n" + " <config name=\"jdisc.http.filter.security.athenz.static-request-resource-mapper\">\n" + " <resourceName>" + domain + ":tester-application</resourceName>\n" + " <action>deploy</action>\n" + " </config>\n" + " </component>\n" + " </filter>\n" + " </request-chain>\n" + " </filtering>\n" + " </http>\n" + "\n" + " <nodes count=\"1\" flavor=\"" + flavor + "\" allocated-memory=\"" + jdiscMemoryPercentage + "%\" />\n" + " </container>\n" + "</services>\n"; return servicesXml.getBytes(StandardCharsets.UTF_8); } /** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */ private static byte[] deploymentXml(Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) { String deploymentSpec = "<?xml version='1.0' encoding='UTF-8'?>\n" + "<deployment version=\"1.0\" " + athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") + athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + "/>"; return deploymentSpec.getBytes(StandardCharsets.UTF_8); } /** Returns the config for the tests to run for the given job. */ private static byte[] testConfig(ApplicationId id, ZoneId testerZone, SystemName system, Map<ZoneId, List<URI>> deployments, Map<ZoneId, List<String>> clusters) { Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString("application", id.serializedForm()); root.setString("zone", testerZone.value()); root.setString("system", system.value()); Cursor endpointsObject = root.setObject("endpoints"); deployments.forEach((zone, endpoints) -> { Cursor endpointArray = endpointsObject.setArray(zone.value()); for (URI endpoint : endpoints) endpointArray.addString(endpoint.toString()); }); Cursor clustersObject = root.setObject("clusters"); clusters.forEach((zone, clusterList) -> { Cursor clusterArray = clustersObject.setArray(zone.value()); for (String cluster : clusterList) clusterArray.addString(cluster); }); try { return SlimeUtils.toJsonBytes(slime); } catch (IOException e) { throw new UncheckedIOException(e); } } /** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */ private class DualLogger { private final RunId id; private final Step step; private DualLogger(RunId id, Step step) { this.id = id; this.step = step; } private void log(String... messages) { log(List.of(messages)); } private void log(List<String> messages) { controller.jobController().log(id, step, DEBUG, messages); } private void log(Level level, String message) { log(level, message, null); } private void log(Level level, String message, Throwable thrown) { logger.log(level, id + " at " + step + ": " + message, thrown); if (thrown != null) { ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream(); thrown.printStackTrace(new PrintStream(traceBuffer)); message += "\n" + traceBuffer; } controller.jobController().log(id, step, level, message); } } }
class InternalStepRunner implements StepRunner { private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName()); static final Duration endpointTimeout = Duration.ofMinutes(15); static final Duration installationTimeout = Duration.ofMinutes(150); private final Controller controller; private final DeploymentFailureMails mails; public InternalStepRunner(Controller controller) { this.controller = controller; this.mails = new DeploymentFailureMails(controller.zoneRegistry()); } @Override public Optional<RunStatus> run(LockedStep step, RunId id) { DualLogger logger = new DualLogger(id, step.get()); try { switch (step.get()) { case deployInitialReal: return deployInitialReal(id, logger); case installInitialReal: return installInitialReal(id, logger); case deployReal: return deployReal(id, logger); case deployTester: return deployTester(id, logger); case installReal: return installReal(id, logger); case installTester: return installTester(id, logger); case startTests: return startTests(id, logger); case endTests: return endTests(id, logger); case copyVespaLogs: return copyVespaLogs(id, logger); case deactivateReal: return deactivateReal(id, logger); case deactivateTester: return deactivateTester(id, logger); case report: return report(id, logger); default: throw new AssertionError("Unknown step '" + step + "'!"); } } catch (UncheckedIOException e) { logger.log(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e)); return Optional.empty(); } catch (RuntimeException e) { logger.log(WARNING, "Unexpected exception running " + id, e); if (JobProfile.of(id.type()).alwaysRun().contains(step.get())) { logger.log("Will keep trying, as this is a cleanup step."); return Optional.empty(); } return Optional.of(error); } } private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) { Versions versions = controller.jobController().run(id).get().versions(); logger.log("Deploying platform version " + versions.sourcePlatform().orElse(versions.targetPlatform()) + " and application version " + versions.sourceApplication().orElse(versions.targetApplication()).id() + " ..."); return deployReal(id, true, versions, logger); } private Optional<RunStatus> deployReal(RunId id, DualLogger logger) { Versions versions = controller.jobController().run(id).get().versions(); logger.log("Deploying platform version " + versions.targetPlatform() + " and application version " + versions.targetApplication().id() + " ..."); return deployReal(id, false, versions, logger); } private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, Versions versions, DualLogger logger) { Optional<ApplicationPackage> applicationPackage = id.type().environment().isManuallyDeployed() ? Optional.of(new ApplicationPackage(controller.applications().applicationStore() .getDev(id.application(), id.type().zone(controller.system())))) : Optional.empty(); Optional<Version> vespaVersion = id.type().environment().isManuallyDeployed() ? Optional.of(versions.targetPlatform()) : Optional.empty(); return deploy(id.application(), id.type(), () -> controller.applications().deploy(id.application(), id.type().zone(controller.system()), applicationPackage, new DeployOptions(false, vespaVersion, false, setTheStage)), logger); } private Optional<RunStatus> deployTester(RunId id, DualLogger logger) { Version platform = controller.jobController().run(id).get().versions().targetPlatform(); logger.log("Deploying the tester container on platform " + platform + " ..."); return deploy(id.tester().id(), id.type(), () -> controller.applications().deployTester(id.tester(), testerPackage(id), id.type().zone(controller.system()), new DeployOptions(true, Optional.of(platform), false, false)), logger); } private Optional<RunStatus> deploy(ApplicationId id, JobType type, Supplier<ActivateResult> deployment, DualLogger logger) { try { PrepareResponse prepareResponse = deployment.get().prepareResponse(); if ( ! prepareResponse.configChangeActions.refeedActions.stream().allMatch(action -> action.allowed)) { List<String> messages = new ArrayList<>(); messages.add("Deploy failed due to non-compatible changes that require re-feed."); messages.add("Your options are:"); messages.add("1. Revert the incompatible changes."); messages.add("2. If you think it is safe in your case, you can override this validation, see"); messages.add(" http: messages.add("3. Deploy as a new application under a different name."); messages.add("Illegal actions:"); prepareResponse.configChangeActions.refeedActions.stream() .filter(action -> ! action.allowed) .flatMap(action -> action.messages.stream()) .forEach(messages::add); messages.add("Details:"); prepareResponse.log.stream() .map(entry -> entry.message) .forEach(messages::add); logger.log(messages); return Optional.of(deploymentFailed); } if (prepareResponse.configChangeActions.restartActions.isEmpty()) logger.log("No services requiring restart."); else prepareResponse.configChangeActions.restartActions.stream() .flatMap(action -> action.services.stream()) .map(service -> service.hostName) .sorted().distinct() .map(Hostname::new) .forEach(hostname -> { controller.applications().restart(new DeploymentId(id, type.zone(controller.system())), Optional.of(hostname)); logger.log("Restarting services on host " + hostname.id() + "."); }); logger.log("Deployment successful."); return Optional.of(running); } catch (ConfigServerException e) { if ( e.getErrorCode() == OUT_OF_CAPACITY && type.isTest() || e.getErrorCode() == ACTIVATION_CONFLICT || e.getErrorCode() == APPLICATION_LOCK_FAILURE || e.getErrorCode() == PARENT_HOST_NOT_READY) { logger.log("Will retry, because of '" + e.getErrorCode() + "' deploying:\n" + e.getMessage()); return Optional.empty(); } if ( e.getErrorCode() == INVALID_APPLICATION_PACKAGE || e.getErrorCode() == BAD_REQUEST) { logger.log("Deployment failed: " + e.getMessage()); return Optional.of(deploymentFailed); } throw e; } } private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) { return installReal(id, true, logger); } private Optional<RunStatus> installReal(RunId id, DualLogger logger) { return installReal(id, false, logger); } private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if ( ! deployment.isPresent()) { logger.log(INFO, "Deployment expired before installation was successful."); return Optional.of(installationFailed); } Versions versions = controller.jobController().run(id).get().versions(); Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform(); ApplicationVersion application = setTheStage ? versions.sourceApplication().orElse(versions.targetApplication()) : versions.targetApplication(); logger.log("Checking installation of " + platform + " and " + application.id() + " ..."); if ( nodesConverged(id.application(), id.type(), platform, logger) && servicesConverged(id.application(), id.type(), logger)) { if (endpointsAvailable(id.application(), id.type().zone(controller.system()), logger)) { logger.log("Installation succeeded!"); return Optional.of(running); } else if (timedOut(deployment.get(), endpointTimeout)) { logger.log(WARNING, "Endpoints failed to show up within " + endpointTimeout.toMinutes() + " minutes!"); return Optional.of(error); } } if (timedOut(deployment.get(), installationTimeout)) { logger.log(INFO, "Installation failed to complete within " + installationTimeout.toMinutes() + " minutes!"); return Optional.of(installationFailed); } logger.log("Installation not yet complete."); return Optional.empty(); } private Optional<RunStatus> installTester(RunId id, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if ( ! deployment.isPresent()) { logger.log(WARNING, "Deployment expired before installation of tester was successful."); return Optional.of(error); } Version platform = controller.jobController().run(id).get().versions().targetPlatform(); logger.log("Checking installation of tester container ..."); if ( nodesConverged(id.tester().id(), id.type(), platform, logger) && servicesConverged(id.tester().id(), id.type(), logger)) { if (endpointsAvailable(id.tester().id(), id.type().zone(controller.system()), logger)) { logger.log("Tester container successfully installed!"); return Optional.of(running); } else if (timedOut(deployment.get(), endpointTimeout)) { logger.log(WARNING, "Tester failed to show up within " + endpointTimeout.toMinutes() + " minutes!"); return Optional.of(error); } } if (timedOut(deployment.get(), installationTimeout)) { logger.log(WARNING, "Installation of tester failed to complete within " + installationTimeout.toMinutes() + " minutes of real deployment!"); return Optional.of(error); } logger.log("Installation of tester not yet complete."); return Optional.empty(); } private boolean endpointsAvailable(ApplicationId id, ZoneId zoneId, DualLogger logger) { logger.log("Attempting to find deployment endpoints ..."); Map<ZoneId, List<URI>> endpoints = deploymentEndpoints(id, Set.of(zoneId)); if ( ! endpoints.containsKey(zoneId)) { logger.log("Endpoints not yet ready."); return false; } List<String> messages = new ArrayList<>(); messages.add("Found endpoints:"); endpoints.forEach((zone, uris) -> { messages.add("- " + zone); uris.forEach(uri -> messages.add(" |-- " + uri)); }); logger.log(messages); return true; } private boolean nodesConverged(ApplicationId id, JobType type, Version target, DualLogger logger) { List<Node> nodes = controller.configServer().nodeRepository().list(type.zone(controller.system()), id, ImmutableSet.of(active, reserved)); List<String> statuses = nodes.stream() .map(node -> String.format("%70s: %-16s%-25s%-32s%s", node.hostname(), node.serviceState(), node.wantedVersion() + (node.currentVersion().equals(node.wantedVersion()) ? "" : " <-- " + node.currentVersion()), node.restartGeneration() >= node.wantedRestartGeneration() ? "" : "restart pending (" + node.wantedRestartGeneration() + " <-- " + node.restartGeneration() + ")", node.rebootGeneration() >= node.wantedRebootGeneration() ? "" : "reboot pending (" + node.wantedRebootGeneration() + " <-- " + node.rebootGeneration() + ")")) .collect(Collectors.toList()); logger.log(statuses); return nodes.stream().allMatch(node -> node.currentVersion().equals(target) && node.restartGeneration() >= node.wantedRestartGeneration() && node.rebootGeneration() >= node.wantedRebootGeneration()); } private boolean servicesConverged(ApplicationId id, JobType type, DualLogger logger) { Optional<ServiceConvergence> convergence = controller.configServer().serviceConvergence(new DeploymentId(id, type.zone(controller.system()))); if ( ! convergence.isPresent()) { logger.log("Config status not currently available -- will retry."); return false; } logger.log("Wanted config generation is " + convergence.get().wantedGeneration()); List<String> statuses = convergence.get().services().stream() .filter(serviceStatus -> serviceStatus.currentGeneration() != convergence.get().wantedGeneration()) .map(serviceStatus -> String.format("%70s: %11s on port %4d has %s", serviceStatus.host().value(), serviceStatus.type(), serviceStatus.port(), serviceStatus.currentGeneration() == -1 ? "not started!" : Long.toString(serviceStatus.currentGeneration()))) .collect(Collectors.toList()); logger.log(statuses); if (statuses.isEmpty()) logger.log("All services on wanted config generation."); return convergence.get().converged(); } private Optional<RunStatus> startTests(RunId id, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if ( ! deployment.isPresent()) { logger.log(INFO, "Deployment expired before tests could start."); return Optional.of(aborted); } Set<ZoneId> zones = testedZoneAndProductionZones(id); logger.log("Attempting to find endpoints ..."); Map<ZoneId, List<URI>> endpoints = deploymentEndpoints(id.application(), zones); if ( ! endpoints.containsKey(id.type().zone(controller.system())) && timedOut(deployment.get(), endpointTimeout)) { logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!"); return Optional.of(error); } List<String> messages = new ArrayList<>(); messages.add("Found endpoints:"); endpoints.forEach((zone, uris) -> { messages.add("- " + zone); uris.forEach(uri -> messages.add(" |-- " + uri)); }); logger.log(messages); Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id); if (testerEndpoint.isEmpty() && timedOut(deployment.get(), endpointTimeout)) { logger.log(WARNING, "Endpoints for the tester container vanished again, while it was still active!"); return Optional.of(error); } if (controller.jobController().cloud().ready(testerEndpoint.get())) { logger.log("Starting tests ..."); controller.jobController().cloud().startTests(testerEndpoint.get(), TesterCloud.Suite.of(id.type()), testConfig(id.application(), id.type().zone(controller.system()), controller.system(), endpoints, listClusters(id.application(), zones))); return Optional.of(running); } logger.log("Tester container not yet ready."); return Optional.empty(); } private Optional<RunStatus> endTests(RunId id, DualLogger logger) { if ( ! deployment(id.application(), id.type()).isPresent()) { logger.log(INFO, "Deployment expired before tests could complete."); return Optional.of(aborted); } Optional<URI> testerEndpoint = controller.jobController().testerEndpoint(id); if ( ! testerEndpoint.isPresent()) { logger.log("Endpoints for tester not found -- trying again later."); return Optional.empty(); } controller.jobController().updateTestLog(id); TesterCloud.Status testStatus = controller.jobController().cloud().getStatus(testerEndpoint.get()); switch (testStatus) { case NOT_STARTED: throw new IllegalStateException("Tester reports tests not started, even though they should have!"); case RUNNING: return Optional.empty(); case FAILURE: logger.log("Tests failed."); return Optional.of(testFailure); case ERROR: logger.log(INFO, "Tester failed running its tests!"); return Optional.of(error); case SUCCESS: logger.log("Tests completed successfully."); return Optional.of(running); default: throw new IllegalStateException("Unknown status '" + testStatus + "'!"); } } private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) { ZoneId zone = id.type().zone(controller.system()); if (deployment(id.application(), id.type()).isPresent()) try { logger.log("Copying Vespa log from nodes of " + id.application() + " in " + zone + " ..."); List<LogEntry> entries = new ArrayList<>(); String logs = IOUtils.readAll(controller.configServer().getLogs(new DeploymentId(id.application(), zone), Collections.emptyMap()), StandardCharsets.UTF_8); for (String line : logs.split("\n")) { String[] parts = line.split("\t"); if (parts.length != 7) continue; entries.add(new LogEntry(0, (long) (Double.parseDouble(parts[0]) * 1000), LogEntry.typeOf(LogLevel.parse(parts[5])), parts[1] + '\t' + parts[3] + '\t' + parts[4] + '\n' + parts[6].replaceAll("\\\\n", "\n") .replaceAll("\\\\t", "\t"))); } controller.jobController().log(id, Step.copyVespaLogs, entries); } catch (Exception e) { logger.log(INFO, "Failure getting vespa logs for " + id, e); return Optional.of(error); } return Optional.of(running); } private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) { try { logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ..."); controller.applications().deactivate(id.application(), id.type().zone(controller.system())); return Optional.of(running); } catch (RuntimeException e) { logger.log(WARNING, "Failed deleting application " + id.application(), e); return Optional.of(error); } } private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) { try { logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ..."); controller.jobController().deactivateTester(id.tester(), id.type()); return Optional.of(running); } catch (RuntimeException e) { logger.log(WARNING, "Failed deleting tester of " + id.application(), e); return Optional.of(error); } } private Optional<RunStatus> report(RunId id, DualLogger logger) { try { controller.jobController().active(id).ifPresent(run -> { JobReport report = JobReport.ofJob(run.id().application(), run.id().type(), run.id().number(), run.hasFailed() ? Optional.of(DeploymentJobs.JobError.unknown) : Optional.empty()); controller.applications().deploymentTrigger().notifyOfCompletion(report); if (run.hasFailed()) sendNotification(run, logger); }); } catch (IllegalStateException e) { logger.log(INFO, "Job '" + id.type() + "'no longer supposed to run?:", e); return Optional.of(error); } return Optional.of(running); } /** Sends a mail with a notification of a failed run, if one should be sent. */ private void sendNotification(Run run, DualLogger logger) { Application application = controller.applications().require(run.id().application()); Notifications notifications = application.deploymentSpec().notifications(); boolean newCommit = application.change().application() .map(run.versions().targetApplication()::equals) .orElse(false); When when = newCommit ? failingCommit : failing; List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when)); if (notifications.emailRolesFor(when).contains(author)) run.versions().targetApplication().authorEmail().ifPresent(recipients::add); if (recipients.isEmpty()) return; try { if (run.status() == outOfCapacity && run.id().type().isProduction()) controller.mailer().send(mails.outOfCapacity(run.id(), recipients)); if (run.status() == deploymentFailed) controller.mailer().send(mails.deploymentFailure(run.id(), recipients)); if (run.status() == installationFailed) controller.mailer().send(mails.installationFailure(run.id(), recipients)); if (run.status() == testFailure) controller.mailer().send(mails.testFailure(run.id(), recipients)); if (run.status() == error) controller.mailer().send(mails.systemError(run.id(), recipients)); } catch (RuntimeException e) { logger.log(INFO, "Exception trying to send mail for " + run.id(), e); } } /** Returns the deployment of the real application in the zone of the given job, if it exists. */ private Optional<Deployment> deployment(ApplicationId id, JobType type) { return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system()))); } /** Returns the real application with the given id. */ /** * Returns whether the time since deployment is more than the zone deployment expiry, or the given timeout. * * We time out the job before the deployment expires, for zone where deployments are not persistent, * to be able to collect the Vespa log from the deployment. Thus, the lower of the zone's deployment expiry, * and the given default installation timeout, minus one minute, is used as a timeout threshold. */ private boolean timedOut(Deployment deployment, Duration defaultTimeout) { Duration timeout = controller.zoneRegistry().getDeploymentTimeToLive(deployment.zone()) .filter(zoneTimeout -> zoneTimeout.compareTo(defaultTimeout) < 0) .orElse(defaultTimeout); return deployment.at().isBefore(controller.clock().instant().minus(timeout.minus(Duration.ofMinutes(1)))); } /** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */ private ApplicationPackage testerPackage(RunId id) { ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication(); DeploymentSpec spec = controller.applications().require(id.application()).deploymentSpec(); byte[] servicesXml = servicesXml(controller.system(), testerFlavorFor(id, spec)); byte[] testPackage = controller.applications().applicationStore().get(id.tester(), version); ZoneId zone = id.type().zone(controller.system()); byte[] deploymentXml = deploymentXml(spec.athenzDomain(), spec.athenzService(zone.environment(), zone.region())); try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + 1000)) { zipBuilder.add(testPackage); zipBuilder.add("services.xml", servicesXml); zipBuilder.add("deployment.xml", deploymentXml); zipBuilder.close(); return new ApplicationPackage(zipBuilder.toByteArray()); } } private static Optional<String> testerFlavorFor(RunId id, DeploymentSpec spec) { for (DeploymentSpec.Step step : spec.steps()) if (step.deploysTo(id.type().environment())) return step.zones().get(0).testerFlavor(); throw new IllegalStateException("No step deploys to the zone this run is for!"); } /** Returns a stream containing the zone of the deployment tested in the given run, and all production zones for the application. */ private Set<ZoneId> testedZoneAndProductionZones(RunId id) { return Stream.concat(Stream.of(id.type().zone(controller.system())), application(id.application()).productionDeployments().keySet().stream()) .collect(Collectors.toSet()); } /** Returns all endpoints for all current deployments of the given real application. */ private Map<ZoneId, List<URI>> deploymentEndpoints(ApplicationId id, Iterable<ZoneId> zones) { ImmutableMap.Builder<ZoneId, List<URI>> deployments = ImmutableMap.builder(); for (ZoneId zone : zones) { controller.applications().getDeploymentEndpoints(new DeploymentId(id, zone)) .filter(endpoints -> ! endpoints.isEmpty()) .or(() -> Optional.of(controller.applications().routingPolicies(new DeploymentId(id, zone)).stream() .map(policy -> policy.endpointIn(controller.system()).url()) .collect(Collectors.toUnmodifiableList())) .filter(endpoints -> ! endpoints.isEmpty())) .ifPresent(endpoints -> deployments.put(zone, endpoints)); } return deployments.build(); } /** Returns all content clusters in all current deployments of the given real application. */ private Map<ZoneId, List<String>> listClusters(ApplicationId id, Iterable<ZoneId> zones) { ImmutableMap.Builder<ZoneId, List<String>> clusters = ImmutableMap.builder(); for (ZoneId zone : zones) clusters.put(zone, ImmutableList.copyOf(controller.configServer().getContentClusters(new DeploymentId(id, zone)))); return clusters.build(); } /** Returns the generated services.xml content for the tester application. */ static byte[] servicesXml(SystemName systemName, Optional<String> testerFlavor) { String domain = systemName == SystemName.main ? "vespa.vespa" : "vespa.vespa.cd"; String flavor = testerFlavor.orElse("d-1-4-50"); int memoryGb = Integer.parseInt(flavor.split("-")[2]); int jdiscMemoryPercentage = (int) Math.ceil(200.0 / memoryGb); int testMemoryMb = 512 * (memoryGb - 2); String servicesXml = "<?xml version='1.0' encoding='UTF-8'?>\n" + "<services xmlns:deploy='vespa' version='1.0'>\n" + " <container version='1.0' id='tester'>\n" + "\n" + " <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" + " <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" + " <artifactsPath>artifacts</artifactsPath>\n" + " <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" + " </config>\n" + " </component>\n" + "\n" + " <handler id=\"com.yahoo.vespa.hosted.testrunner.TestRunnerHandler\" bundle=\"vespa-testrunner-components\">\n" + " <binding>http: " </handler>\n" + "\n" + " <http>\n" + " <server id='default' port='4080'/>\n" + " <filtering>\n" + " <access-control domain='" + domain + "'>\n" + " <exclude>\n" + " <binding>http: " </exclude>\n" + " </access-control>\n" + " <request-chain id=\"testrunner-api\">\n" + " <filter id='authz-filter' class='com.yahoo.jdisc.http.filter.security.athenz.AthenzAuthorizationFilter' bundle=\"jdisc-security-filters\">\n" + " <config name=\"jdisc.http.filter.security.athenz.athenz-authorization-filter\">\n" + " <credentialsToVerify>TOKEN_ONLY</credentialsToVerify>\n" + " <roleTokenHeaderName>Yahoo-Role-Auth</roleTokenHeaderName>\n" + " </config>\n" + " <component id=\"com.yahoo.jdisc.http.filter.security.athenz.StaticRequestResourceMapper\" bundle=\"jdisc-security-filters\">\n" + " <config name=\"jdisc.http.filter.security.athenz.static-request-resource-mapper\">\n" + " <resourceName>" + domain + ":tester-application</resourceName>\n" + " <action>deploy</action>\n" + " </config>\n" + " </component>\n" + " </filter>\n" + " </request-chain>\n" + " </filtering>\n" + " </http>\n" + "\n" + " <nodes count=\"1\" flavor=\"" + flavor + "\" allocated-memory=\"" + jdiscMemoryPercentage + "%\" />\n" + " </container>\n" + "</services>\n"; return servicesXml.getBytes(StandardCharsets.UTF_8); } /** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */ private static byte[] deploymentXml(Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) { String deploymentSpec = "<?xml version='1.0' encoding='UTF-8'?>\n" + "<deployment version=\"1.0\" " + athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") + athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + "/>"; return deploymentSpec.getBytes(StandardCharsets.UTF_8); } /** Returns the config for the tests to run for the given job. */ private static byte[] testConfig(ApplicationId id, ZoneId testerZone, SystemName system, Map<ZoneId, List<URI>> deployments, Map<ZoneId, List<String>> clusters) { Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString("application", id.serializedForm()); root.setString("zone", testerZone.value()); root.setString("system", system.value()); Cursor endpointsObject = root.setObject("endpoints"); deployments.forEach((zone, endpoints) -> { Cursor endpointArray = endpointsObject.setArray(zone.value()); for (URI endpoint : endpoints) endpointArray.addString(endpoint.toString()); }); Cursor clustersObject = root.setObject("clusters"); clusters.forEach((zone, clusterList) -> { Cursor clusterArray = clustersObject.setArray(zone.value()); for (String cluster : clusterList) clusterArray.addString(cluster); }); try { return SlimeUtils.toJsonBytes(slime); } catch (IOException e) { throw new UncheckedIOException(e); } } /** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */ private class DualLogger { private final RunId id; private final Step step; private DualLogger(RunId id, Step step) { this.id = id; this.step = step; } private void log(String... messages) { log(List.of(messages)); } private void log(List<String> messages) { controller.jobController().log(id, step, DEBUG, messages); } private void log(Level level, String message) { log(level, message, null); } private void log(Level level, String message, Throwable thrown) { logger.log(level, id + " at " + step + ": " + message, thrown); if (thrown != null) { ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream(); thrown.printStackTrace(new PrintStream(traceBuffer)); message += "\n" + traceBuffer; } controller.jobController().log(id, step, level, message); } } }
I think you need to do subItemCopy.setParent(copy) then.
public CompositeItem clone() { CompositeItem copy = (CompositeItem) super.clone(); copy.subitems = new java.util.ArrayList<>(); for (Item subItem : subitems) { Item subItemCopy = subItem.clone(); copy.subitems.add(subItemCopy); } fixConnexity(copy); return copy; }
copy.subitems.add(subItemCopy);
public CompositeItem clone() { CompositeItem copy = (CompositeItem) super.clone(); copy.subitems = new java.util.ArrayList<>(); for (Item subItem : subitems) { Item subItemCopy = subItem.clone(); subItemCopy.setParent(copy); copy.subitems.add(subItemCopy); } fixConnexity(copy); return copy; }
class CompositeItem extends Item { private List<Item> subitems = new java.util.ArrayList<>(4); /** Sets the index name of all subitems of this */ public void setIndexName(String index) { for (Iterator<Item> i = getItemIterator(); i.hasNext();) { Item item = i.next(); item.setIndexName(index); } } @Override public void disclose(Discloser discloser) { super.disclose(discloser); for (Item item : subitems) discloser.addChild(item); } public void ensureNotInSubtree(CompositeItem item) { for (Iterator<Item> i = item.getItemIterator(); i.hasNext();) { Item possibleCycle = i.next(); if (this == possibleCycle) { throw new QueryException("Cannot add " + item + " to " + this + " as it would create a cycle"); } else if (possibleCycle instanceof CompositeItem) { ensureNotInSubtree((CompositeItem) possibleCycle); } } } public void addItem(Item item) { adding(item); subitems.add(item); } protected void adding(Item item) { Validator.ensureNotNull("A composite item child", item); Validator.ensure("Attempted to add a composite to itself", item != this); if (item instanceof CompositeItem) { ensureNotInSubtree((CompositeItem) item); } item.setParent(this); } /** * Inserts the item at a position and increases the index of existing items * starting on this position by one * * @throws IndexOutOfBoundsException if the index is out of range */ public void addItem(int index, Item item) { if (index > subitems.size() || index < 0) { throw new IndexOutOfBoundsException("Could not add a subitem at position " + index + " to " + this); } adding(item); subitems.add(index, item); } /** For NOT items, which may wish to insert nulls */ void insertNullFirstItem() { subitems.add(0, null); } /** * Returns a subitem * * @param index the (0-base) index of the item to return * @throws IndexOutOfBoundsException if there is no subitem at index */ public Item getItem(int index) { return subitems.get(index); } /** * Replaces the item at the given index * * @param index the (0-base) index of the item to replace * @param item the new item * @return the old item at this position. The parent of the old item is <i>not</i> cleared * @throws IndexOutOfBoundsException if there is no item at this index */ public Item setItem(int index, Item item) { if (index >= subitems.size() || index < 0) throw new IndexOutOfBoundsException("Could not add a subitem at position " + index + " to " + this); adding(item); Item old = subitems.set(index, item); if (old!=item) removing(old); return old; } /** * Returns the index of a subitem * * @param item The child item to find the index of * @return the 0-base index of the child or -1 if there is no such child */ public int getItemIndex(Item item) { return subitems.indexOf(item); } /** * Removes the item at the given index * * @param index the index of the item to remove * @return the removed item * @throws IndexOutOfBoundsException if there is no item at the given index */ public Item removeItem(int index) { Item item = subitems.remove(index); removing(item); return item; } /** Always call on every remove */ private void removing(Item item) { if (item == null) { return; } if (item.getParent() == this) { item.setParent(null); } } /** * Removes the given item. Does nothing if the item is not present. * * @param item the item to remove * @return whether the item was removed */ public boolean removeItem(Item item) { boolean removed = subitems.remove(item); if (removed) { removing(item); } return removed; } /** Returns the number of direct ancestors of this item */ public int getItemCount() { return subitems.size(); } /** Returns a modifiable list iterator */ public ListIterator<Item> getItemIterator() { return new ListIteratorWrapper(this); } public int encode(ByteBuffer buffer) { encodeThis(buffer); int itemCount = 1; for (Iterator<Item> i = getItemIterator(); i.hasNext();) { Item subitem = i.next(); itemCount += subitem.encode(buffer); } return itemCount; } /** * Encodes just this item, not it's usual subitems, to the given buffer. */ protected void encodeThis(ByteBuffer buffer) { super.encodeThis(buffer); IntegerCompressor.putCompressedPositiveNumber(encodingArity(), buffer); } protected int encodingArity() { return subitems.size(); } protected void appendBodyString(StringBuilder buffer) { for (Iterator<Item> i = getItemIterator(); i.hasNext();) { Item item = i.next(); buffer.append(item.toString()); if (i.hasNext()) { buffer.append(" "); } } } /** Composite items should be parenthized when not on the top level */ protected boolean shouldParenthize() { return getParent()!= null && ! (getParent() instanceof QueryTree); } /** Returns a deep copy of this item */ private void fixConnexity(CompositeItem copy) { List<Item> flatland = new ArrayList<>(); List<Item> flatCopy = new ArrayList<>(); taggingFlatten(this, flatland); taggingFlatten(copy, flatCopy); int barrier = flatland.size(); for (int i = 0; i < barrier; ++i) { Item orig = flatland.get(i); int connectedTo = find(orig.connectedItem, flatland); if (connectedTo >= 0) { TaggableItem tagged = (TaggableItem) flatCopy.get(i); tagged.setConnectivity(flatCopy.get(connectedTo), orig.connectivity); } } } private void taggingFlatten(Item tree, List<Item> container) { if (tree.hasUniqueID()) { container.add(tree); } else if (tree instanceof CompositeItem) { CompositeItem asComposite = (CompositeItem) tree; for (Iterator<Item> i = asComposite.getItemIterator(); i.hasNext();) { taggingFlatten(i.next(), container); } } } private int find(Item needle, List<Item> haystack) { if (needle == null) { return -1; } int barrier = haystack.size(); for (int i = 0; i < barrier; ++i) { if (haystack.get(i) == needle) { return i; } } return -1; } public int hashCode() { int code = getName().hashCode() + subitems.size() * 17; for (int i = 0; i < subitems.size() && i <= 5; i++) { code += subitems.get(i).hashCode(); } return code; } /** * Returns whether this item is of the same class and * contains the same state as the given item */ public boolean equals(Object object) { if (!super.equals(object)) { return false; } CompositeItem other = (CompositeItem) object; if (!this.subitems.equals(other.subitems)) { return false; } return true; } /** Make composite immutable if this is supported. */ public void lock() {} /** Whether this composite is in a mutable state. */ public boolean isLocked() { return false; } /** Handles mutator calls correctly */ private static class ListIteratorWrapper implements ListIterator<Item> { private CompositeItem owner; private ListIterator<Item> wrapped; private Item current = null; public ListIteratorWrapper(CompositeItem owner) { this.owner = owner; wrapped = owner.subitems.listIterator(); } public boolean hasNext() { return wrapped.hasNext(); } public Item next() { current = wrapped.next(); return current; } public boolean hasPrevious() { return wrapped.hasPrevious(); } public Item previous() { Item current = wrapped.previous(); return current; } public int nextIndex() { return wrapped.nextIndex(); } public int previousIndex() { return wrapped.previousIndex(); } public void remove() { owner.removing(current); wrapped.remove(); } public void set(Item o) { Item newItem = o; owner.removing(current); owner.adding(newItem); current = newItem; wrapped.set(newItem); } public void add(Item o) { Item newItem = o; owner.adding(newItem); wrapped.add(o); } } @Override public int getTermCount() { int terms = 0; for (Item item : subitems) { terms += item.getTermCount(); } return terms; } }
class CompositeItem extends Item { private List<Item> subitems = new java.util.ArrayList<>(4); /** Sets the index name of all subitems of this */ public void setIndexName(String index) { for (Iterator<Item> i = getItemIterator(); i.hasNext();) { Item item = i.next(); item.setIndexName(index); } } @Override public void disclose(Discloser discloser) { super.disclose(discloser); for (Item item : subitems) discloser.addChild(item); } public void ensureNotInSubtree(CompositeItem item) { for (Iterator<Item> i = item.getItemIterator(); i.hasNext();) { Item possibleCycle = i.next(); if (this == possibleCycle) { throw new QueryException("Cannot add " + item + " to " + this + " as it would create a cycle"); } else if (possibleCycle instanceof CompositeItem) { ensureNotInSubtree((CompositeItem) possibleCycle); } } } public void addItem(Item item) { adding(item); subitems.add(item); } protected void adding(Item item) { Validator.ensureNotNull("A composite item child", item); Validator.ensure("Attempted to add a composite to itself", item != this); if (item instanceof CompositeItem) { ensureNotInSubtree((CompositeItem) item); } item.setParent(this); } /** * Inserts the item at a position and increases the index of existing items * starting on this position by one * * @throws IndexOutOfBoundsException if the index is out of range */ public void addItem(int index, Item item) { if (index > subitems.size() || index < 0) { throw new IndexOutOfBoundsException("Could not add a subitem at position " + index + " to " + this); } adding(item); subitems.add(index, item); } /** For NOT items, which may wish to insert nulls */ void insertNullFirstItem() { subitems.add(0, null); } /** * Returns a subitem * * @param index the (0-base) index of the item to return * @throws IndexOutOfBoundsException if there is no subitem at index */ public Item getItem(int index) { return subitems.get(index); } /** * Replaces the item at the given index * * @param index the (0-base) index of the item to replace * @param item the new item * @return the old item at this position. The parent of the old item is <i>not</i> cleared * @throws IndexOutOfBoundsException if there is no item at this index */ public Item setItem(int index, Item item) { if (index >= subitems.size() || index < 0) throw new IndexOutOfBoundsException("Could not add a subitem at position " + index + " to " + this); adding(item); Item old = subitems.set(index, item); if (old!=item) removing(old); return old; } /** * Returns the index of a subitem * * @param item The child item to find the index of * @return the 0-base index of the child or -1 if there is no such child */ public int getItemIndex(Item item) { return subitems.indexOf(item); } /** * Removes the item at the given index * * @param index the index of the item to remove * @return the removed item * @throws IndexOutOfBoundsException if there is no item at the given index */ public Item removeItem(int index) { Item item = subitems.remove(index); removing(item); return item; } /** Always call on every remove */ private void removing(Item item) { if (item == null) { return; } if (item.getParent() == this) { item.setParent(null); } } /** * Removes the given item. Does nothing if the item is not present. * * @param item the item to remove * @return whether the item was removed */ public boolean removeItem(Item item) { boolean removed = subitems.remove(item); if (removed) { removing(item); } return removed; } /** Returns the number of direct ancestors of this item */ public int getItemCount() { return subitems.size(); } /** Returns a modifiable list iterator */ public ListIterator<Item> getItemIterator() { return new ListIteratorWrapper(this); } public int encode(ByteBuffer buffer) { encodeThis(buffer); int itemCount = 1; for (Iterator<Item> i = getItemIterator(); i.hasNext();) { Item subitem = i.next(); itemCount += subitem.encode(buffer); } return itemCount; } /** * Encodes just this item, not it's usual subitems, to the given buffer. */ protected void encodeThis(ByteBuffer buffer) { super.encodeThis(buffer); IntegerCompressor.putCompressedPositiveNumber(encodingArity(), buffer); } protected int encodingArity() { return subitems.size(); } protected void appendBodyString(StringBuilder buffer) { for (Iterator<Item> i = getItemIterator(); i.hasNext();) { Item item = i.next(); buffer.append(item.toString()); if (i.hasNext()) { buffer.append(" "); } } } /** Composite items should be parenthized when not on the top level */ protected boolean shouldParenthize() { return getParent()!= null && ! (getParent() instanceof QueryTree); } /** Returns a deep copy of this item */ private void fixConnexity(CompositeItem copy) { List<Item> flatland = new ArrayList<>(); List<Item> flatCopy = new ArrayList<>(); taggingFlatten(this, flatland); taggingFlatten(copy, flatCopy); int barrier = flatland.size(); for (int i = 0; i < barrier; ++i) { Item orig = flatland.get(i); int connectedTo = find(orig.connectedItem, flatland); if (connectedTo >= 0) { TaggableItem tagged = (TaggableItem) flatCopy.get(i); tagged.setConnectivity(flatCopy.get(connectedTo), orig.connectivity); } } } private void taggingFlatten(Item tree, List<Item> container) { if (tree.hasUniqueID()) { container.add(tree); } else if (tree instanceof CompositeItem) { CompositeItem asComposite = (CompositeItem) tree; for (Iterator<Item> i = asComposite.getItemIterator(); i.hasNext();) { taggingFlatten(i.next(), container); } } } private int find(Item needle, List<Item> haystack) { if (needle == null) { return -1; } int barrier = haystack.size(); for (int i = 0; i < barrier; ++i) { if (haystack.get(i) == needle) { return i; } } return -1; } public int hashCode() { int code = getName().hashCode() + subitems.size() * 17; for (int i = 0; i < subitems.size() && i <= 5; i++) { code += subitems.get(i).hashCode(); } return code; } /** * Returns whether this item is of the same class and * contains the same state as the given item */ public boolean equals(Object object) { if (!super.equals(object)) { return false; } CompositeItem other = (CompositeItem) object; if (!this.subitems.equals(other.subitems)) { return false; } return true; } /** Make composite immutable if this is supported. */ public void lock() {} /** Whether this composite is in a mutable state. */ public boolean isLocked() { return false; } /** Handles mutator calls correctly */ private static class ListIteratorWrapper implements ListIterator<Item> { private CompositeItem owner; private ListIterator<Item> wrapped; private Item current = null; public ListIteratorWrapper(CompositeItem owner) { this.owner = owner; wrapped = owner.subitems.listIterator(); } public boolean hasNext() { return wrapped.hasNext(); } public Item next() { current = wrapped.next(); return current; } public boolean hasPrevious() { return wrapped.hasPrevious(); } public Item previous() { Item current = wrapped.previous(); return current; } public int nextIndex() { return wrapped.nextIndex(); } public int previousIndex() { return wrapped.previousIndex(); } public void remove() { owner.removing(current); wrapped.remove(); } public void set(Item o) { Item newItem = o; owner.removing(current); owner.adding(newItem); current = newItem; wrapped.set(newItem); } public void add(Item o) { Item newItem = o; owner.adding(newItem); wrapped.add(o); } } @Override public int getTermCount() { int terms = 0; for (Item item : subitems) { terms += item.getTermCount(); } return terms; } }
Consider `IllegalStateException`. This can only happen if an application is deployed *before* any proxy nodes are provisioned, i.e. an illegal state. 🙂
public LoadBalancerInstance create(ApplicationId application, ClusterSpec.Id cluster, Set<Real> reals) { final var proxyNodes = nodeRepository.getNodes(NodeType.proxy); if (proxyNodes.size() == 0) { throw new RuntimeException("Missing proxy nodes in nodeRepository"); } final var firstProxyNode = proxyNodes.get(0); final var networkNames = proxyNodes.stream() .flatMap(node -> node.ipAddresses().stream()) .collect(Collectors.toSet()); return new LoadBalancerInstance( HostName.from(firstProxyNode.hostname()), Optional.empty(), Set.of(4080, 4443), networkNames, reals ); }
throw new RuntimeException("Missing proxy nodes in nodeRepository");
public LoadBalancerInstance create(ApplicationId application, ClusterSpec.Id cluster, Set<Real> reals) { final var proxyNodes = nodeRepository.getNodes(NodeType.proxy); proxyNodes.sort(hostnameComparator); if (proxyNodes.size() == 0) { throw new IllegalStateException("Missing proxy nodes in nodeRepository"); } final var firstProxyNode = proxyNodes.get(0); final var networkNames = proxyNodes.stream() .flatMap(node -> node.ipAddresses().stream()) .map(SharedLoadBalancerService::addNetworKPrefixLength) .collect(Collectors.toSet()); return new LoadBalancerInstance( HostName.from(firstProxyNode.hostname()), Optional.empty(), Set.of(4080, 4443), networkNames, reals ); }
class SharedLoadBalancerService implements LoadBalancerService { private final NodeRepository nodeRepository; public SharedLoadBalancerService(NodeRepository nodeRepository) { this.nodeRepository = Objects.requireNonNull(nodeRepository, "Missing nodeRepository value"); } @Override @Override public void remove(ApplicationId application, ClusterSpec.Id cluster) { } @Override public Protocol protocol() { return Protocol.dualstack; } }
class SharedLoadBalancerService implements LoadBalancerService { private static final Comparator<Node> hostnameComparator = Comparator.comparing(Node::hostname); private final NodeRepository nodeRepository; @Inject public SharedLoadBalancerService(NodeRepository nodeRepository) { this.nodeRepository = Objects.requireNonNull(nodeRepository, "Missing nodeRepository value"); } @Override @Override public void remove(ApplicationId application, ClusterSpec.Id cluster) { } @Override public Protocol protocol() { return Protocol.dualstack; } private static String addNetworKPrefixLength(String address) { if (IP.isV6(address)) { return address + "/128"; } else { return address + "/32"; } } }
Nit: While we still don't have an official code style, the convention is not finalize local vars by default.
public LoadBalancerInstance create(ApplicationId application, ClusterSpec.Id cluster, Set<Real> reals) { final var proxyNodes = nodeRepository.getNodes(NodeType.proxy); if (proxyNodes.size() == 0) { throw new RuntimeException("Missing proxy nodes in nodeRepository"); } final var firstProxyNode = proxyNodes.get(0); final var networkNames = proxyNodes.stream() .flatMap(node -> node.ipAddresses().stream()) .collect(Collectors.toSet()); return new LoadBalancerInstance( HostName.from(firstProxyNode.hostname()), Optional.empty(), Set.of(4080, 4443), networkNames, reals ); }
final var proxyNodes = nodeRepository.getNodes(NodeType.proxy);
public LoadBalancerInstance create(ApplicationId application, ClusterSpec.Id cluster, Set<Real> reals) { final var proxyNodes = nodeRepository.getNodes(NodeType.proxy); proxyNodes.sort(hostnameComparator); if (proxyNodes.size() == 0) { throw new IllegalStateException("Missing proxy nodes in nodeRepository"); } final var firstProxyNode = proxyNodes.get(0); final var networkNames = proxyNodes.stream() .flatMap(node -> node.ipAddresses().stream()) .map(SharedLoadBalancerService::addNetworKPrefixLength) .collect(Collectors.toSet()); return new LoadBalancerInstance( HostName.from(firstProxyNode.hostname()), Optional.empty(), Set.of(4080, 4443), networkNames, reals ); }
class SharedLoadBalancerService implements LoadBalancerService { private final NodeRepository nodeRepository; public SharedLoadBalancerService(NodeRepository nodeRepository) { this.nodeRepository = Objects.requireNonNull(nodeRepository, "Missing nodeRepository value"); } @Override @Override public void remove(ApplicationId application, ClusterSpec.Id cluster) { } @Override public Protocol protocol() { return Protocol.dualstack; } }
class SharedLoadBalancerService implements LoadBalancerService { private static final Comparator<Node> hostnameComparator = Comparator.comparing(Node::hostname); private final NodeRepository nodeRepository; @Inject public SharedLoadBalancerService(NodeRepository nodeRepository) { this.nodeRepository = Objects.requireNonNull(nodeRepository, "Missing nodeRepository value"); } @Override @Override public void remove(ApplicationId application, ClusterSpec.Id cluster) { } @Override public Protocol protocol() { return Protocol.dualstack; } private static String addNetworKPrefixLength(String address) { if (IP.isV6(address)) { return address + "/128"; } else { return address + "/32"; } } }
Consider sorting list by hostname, so that "first" is deterministic.
public LoadBalancerInstance create(ApplicationId application, ClusterSpec.Id cluster, Set<Real> reals) { final var proxyNodes = nodeRepository.getNodes(NodeType.proxy); if (proxyNodes.size() == 0) { throw new RuntimeException("Missing proxy nodes in nodeRepository"); } final var firstProxyNode = proxyNodes.get(0); final var networkNames = proxyNodes.stream() .flatMap(node -> node.ipAddresses().stream()) .collect(Collectors.toSet()); return new LoadBalancerInstance( HostName.from(firstProxyNode.hostname()), Optional.empty(), Set.of(4080, 4443), networkNames, reals ); }
final var firstProxyNode = proxyNodes.get(0);
public LoadBalancerInstance create(ApplicationId application, ClusterSpec.Id cluster, Set<Real> reals) { final var proxyNodes = nodeRepository.getNodes(NodeType.proxy); proxyNodes.sort(hostnameComparator); if (proxyNodes.size() == 0) { throw new IllegalStateException("Missing proxy nodes in nodeRepository"); } final var firstProxyNode = proxyNodes.get(0); final var networkNames = proxyNodes.stream() .flatMap(node -> node.ipAddresses().stream()) .map(SharedLoadBalancerService::addNetworKPrefixLength) .collect(Collectors.toSet()); return new LoadBalancerInstance( HostName.from(firstProxyNode.hostname()), Optional.empty(), Set.of(4080, 4443), networkNames, reals ); }
class SharedLoadBalancerService implements LoadBalancerService { private final NodeRepository nodeRepository; public SharedLoadBalancerService(NodeRepository nodeRepository) { this.nodeRepository = Objects.requireNonNull(nodeRepository, "Missing nodeRepository value"); } @Override @Override public void remove(ApplicationId application, ClusterSpec.Id cluster) { } @Override public Protocol protocol() { return Protocol.dualstack; } }
class SharedLoadBalancerService implements LoadBalancerService { private static final Comparator<Node> hostnameComparator = Comparator.comparing(Node::hostname); private final NodeRepository nodeRepository; @Inject public SharedLoadBalancerService(NodeRepository nodeRepository) { this.nodeRepository = Objects.requireNonNull(nodeRepository, "Missing nodeRepository value"); } @Override @Override public void remove(ApplicationId application, ClusterSpec.Id cluster) { } @Override public Protocol protocol() { return Protocol.dualstack; } private static String addNetworKPrefixLength(String address) { if (IP.isV6(address)) { return address + "/128"; } else { return address + "/32"; } } }
Networks are expected to be in CIDR notation, so append `/32` or `/128` depending on if it's IPv4 or IPv6.
public LoadBalancerInstance create(ApplicationId application, ClusterSpec.Id cluster, Set<Real> reals) { final var proxyNodes = nodeRepository.getNodes(NodeType.proxy); if (proxyNodes.size() == 0) { throw new RuntimeException("Missing proxy nodes in nodeRepository"); } final var firstProxyNode = proxyNodes.get(0); final var networkNames = proxyNodes.stream() .flatMap(node -> node.ipAddresses().stream()) .collect(Collectors.toSet()); return new LoadBalancerInstance( HostName.from(firstProxyNode.hostname()), Optional.empty(), Set.of(4080, 4443), networkNames, reals ); }
networkNames,
public LoadBalancerInstance create(ApplicationId application, ClusterSpec.Id cluster, Set<Real> reals) { final var proxyNodes = nodeRepository.getNodes(NodeType.proxy); proxyNodes.sort(hostnameComparator); if (proxyNodes.size() == 0) { throw new IllegalStateException("Missing proxy nodes in nodeRepository"); } final var firstProxyNode = proxyNodes.get(0); final var networkNames = proxyNodes.stream() .flatMap(node -> node.ipAddresses().stream()) .map(SharedLoadBalancerService::addNetworKPrefixLength) .collect(Collectors.toSet()); return new LoadBalancerInstance( HostName.from(firstProxyNode.hostname()), Optional.empty(), Set.of(4080, 4443), networkNames, reals ); }
class SharedLoadBalancerService implements LoadBalancerService { private final NodeRepository nodeRepository; public SharedLoadBalancerService(NodeRepository nodeRepository) { this.nodeRepository = Objects.requireNonNull(nodeRepository, "Missing nodeRepository value"); } @Override @Override public void remove(ApplicationId application, ClusterSpec.Id cluster) { } @Override public Protocol protocol() { return Protocol.dualstack; } }
class SharedLoadBalancerService implements LoadBalancerService { private static final Comparator<Node> hostnameComparator = Comparator.comparing(Node::hostname); private final NodeRepository nodeRepository; @Inject public SharedLoadBalancerService(NodeRepository nodeRepository) { this.nodeRepository = Objects.requireNonNull(nodeRepository, "Missing nodeRepository value"); } @Override @Override public void remove(ApplicationId application, ClusterSpec.Id cluster) { } @Override public Protocol protocol() { return Protocol.dualstack; } private static String addNetworKPrefixLength(String address) { if (IP.isV6(address)) { return address + "/128"; } else { return address + "/32"; } } }
Done
public CompositeItem clone() { CompositeItem copy = (CompositeItem) super.clone(); copy.subitems = new java.util.ArrayList<>(); for (Item subItem : subitems) { Item subItemCopy = subItem.clone(); copy.subitems.add(subItemCopy); } fixConnexity(copy); return copy; }
copy.subitems.add(subItemCopy);
public CompositeItem clone() { CompositeItem copy = (CompositeItem) super.clone(); copy.subitems = new java.util.ArrayList<>(); for (Item subItem : subitems) { Item subItemCopy = subItem.clone(); subItemCopy.setParent(copy); copy.subitems.add(subItemCopy); } fixConnexity(copy); return copy; }
class CompositeItem extends Item { private List<Item> subitems = new java.util.ArrayList<>(4); /** Sets the index name of all subitems of this */ public void setIndexName(String index) { for (Iterator<Item> i = getItemIterator(); i.hasNext();) { Item item = i.next(); item.setIndexName(index); } } @Override public void disclose(Discloser discloser) { super.disclose(discloser); for (Item item : subitems) discloser.addChild(item); } public void ensureNotInSubtree(CompositeItem item) { for (Iterator<Item> i = item.getItemIterator(); i.hasNext();) { Item possibleCycle = i.next(); if (this == possibleCycle) { throw new QueryException("Cannot add " + item + " to " + this + " as it would create a cycle"); } else if (possibleCycle instanceof CompositeItem) { ensureNotInSubtree((CompositeItem) possibleCycle); } } } public void addItem(Item item) { adding(item); subitems.add(item); } protected void adding(Item item) { Validator.ensureNotNull("A composite item child", item); Validator.ensure("Attempted to add a composite to itself", item != this); if (item instanceof CompositeItem) { ensureNotInSubtree((CompositeItem) item); } item.setParent(this); } /** * Inserts the item at a position and increases the index of existing items * starting on this position by one * * @throws IndexOutOfBoundsException if the index is out of range */ public void addItem(int index, Item item) { if (index > subitems.size() || index < 0) { throw new IndexOutOfBoundsException("Could not add a subitem at position " + index + " to " + this); } adding(item); subitems.add(index, item); } /** For NOT items, which may wish to insert nulls */ void insertNullFirstItem() { subitems.add(0, null); } /** * Returns a subitem * * @param index the (0-base) index of the item to return * @throws IndexOutOfBoundsException if there is no subitem at index */ public Item getItem(int index) { return subitems.get(index); } /** * Replaces the item at the given index * * @param index the (0-base) index of the item to replace * @param item the new item * @return the old item at this position. The parent of the old item is <i>not</i> cleared * @throws IndexOutOfBoundsException if there is no item at this index */ public Item setItem(int index, Item item) { if (index >= subitems.size() || index < 0) throw new IndexOutOfBoundsException("Could not add a subitem at position " + index + " to " + this); adding(item); Item old = subitems.set(index, item); if (old!=item) removing(old); return old; } /** * Returns the index of a subitem * * @param item The child item to find the index of * @return the 0-base index of the child or -1 if there is no such child */ public int getItemIndex(Item item) { return subitems.indexOf(item); } /** * Removes the item at the given index * * @param index the index of the item to remove * @return the removed item * @throws IndexOutOfBoundsException if there is no item at the given index */ public Item removeItem(int index) { Item item = subitems.remove(index); removing(item); return item; } /** Always call on every remove */ private void removing(Item item) { if (item == null) { return; } if (item.getParent() == this) { item.setParent(null); } } /** * Removes the given item. Does nothing if the item is not present. * * @param item the item to remove * @return whether the item was removed */ public boolean removeItem(Item item) { boolean removed = subitems.remove(item); if (removed) { removing(item); } return removed; } /** Returns the number of direct ancestors of this item */ public int getItemCount() { return subitems.size(); } /** Returns a modifiable list iterator */ public ListIterator<Item> getItemIterator() { return new ListIteratorWrapper(this); } public int encode(ByteBuffer buffer) { encodeThis(buffer); int itemCount = 1; for (Iterator<Item> i = getItemIterator(); i.hasNext();) { Item subitem = i.next(); itemCount += subitem.encode(buffer); } return itemCount; } /** * Encodes just this item, not it's usual subitems, to the given buffer. */ protected void encodeThis(ByteBuffer buffer) { super.encodeThis(buffer); IntegerCompressor.putCompressedPositiveNumber(encodingArity(), buffer); } protected int encodingArity() { return subitems.size(); } protected void appendBodyString(StringBuilder buffer) { for (Iterator<Item> i = getItemIterator(); i.hasNext();) { Item item = i.next(); buffer.append(item.toString()); if (i.hasNext()) { buffer.append(" "); } } } /** Composite items should be parenthized when not on the top level */ protected boolean shouldParenthize() { return getParent()!= null && ! (getParent() instanceof QueryTree); } /** Returns a deep copy of this item */ private void fixConnexity(CompositeItem copy) { List<Item> flatland = new ArrayList<>(); List<Item> flatCopy = new ArrayList<>(); taggingFlatten(this, flatland); taggingFlatten(copy, flatCopy); int barrier = flatland.size(); for (int i = 0; i < barrier; ++i) { Item orig = flatland.get(i); int connectedTo = find(orig.connectedItem, flatland); if (connectedTo >= 0) { TaggableItem tagged = (TaggableItem) flatCopy.get(i); tagged.setConnectivity(flatCopy.get(connectedTo), orig.connectivity); } } } private void taggingFlatten(Item tree, List<Item> container) { if (tree.hasUniqueID()) { container.add(tree); } else if (tree instanceof CompositeItem) { CompositeItem asComposite = (CompositeItem) tree; for (Iterator<Item> i = asComposite.getItemIterator(); i.hasNext();) { taggingFlatten(i.next(), container); } } } private int find(Item needle, List<Item> haystack) { if (needle == null) { return -1; } int barrier = haystack.size(); for (int i = 0; i < barrier; ++i) { if (haystack.get(i) == needle) { return i; } } return -1; } public int hashCode() { int code = getName().hashCode() + subitems.size() * 17; for (int i = 0; i < subitems.size() && i <= 5; i++) { code += subitems.get(i).hashCode(); } return code; } /** * Returns whether this item is of the same class and * contains the same state as the given item */ public boolean equals(Object object) { if (!super.equals(object)) { return false; } CompositeItem other = (CompositeItem) object; if (!this.subitems.equals(other.subitems)) { return false; } return true; } /** Make composite immutable if this is supported. */ public void lock() {} /** Whether this composite is in a mutable state. */ public boolean isLocked() { return false; } /** Handles mutator calls correctly */ private static class ListIteratorWrapper implements ListIterator<Item> { private CompositeItem owner; private ListIterator<Item> wrapped; private Item current = null; public ListIteratorWrapper(CompositeItem owner) { this.owner = owner; wrapped = owner.subitems.listIterator(); } public boolean hasNext() { return wrapped.hasNext(); } public Item next() { current = wrapped.next(); return current; } public boolean hasPrevious() { return wrapped.hasPrevious(); } public Item previous() { Item current = wrapped.previous(); return current; } public int nextIndex() { return wrapped.nextIndex(); } public int previousIndex() { return wrapped.previousIndex(); } public void remove() { owner.removing(current); wrapped.remove(); } public void set(Item o) { Item newItem = o; owner.removing(current); owner.adding(newItem); current = newItem; wrapped.set(newItem); } public void add(Item o) { Item newItem = o; owner.adding(newItem); wrapped.add(o); } } @Override public int getTermCount() { int terms = 0; for (Item item : subitems) { terms += item.getTermCount(); } return terms; } }
class CompositeItem extends Item { private List<Item> subitems = new java.util.ArrayList<>(4); /** Sets the index name of all subitems of this */ public void setIndexName(String index) { for (Iterator<Item> i = getItemIterator(); i.hasNext();) { Item item = i.next(); item.setIndexName(index); } } @Override public void disclose(Discloser discloser) { super.disclose(discloser); for (Item item : subitems) discloser.addChild(item); } public void ensureNotInSubtree(CompositeItem item) { for (Iterator<Item> i = item.getItemIterator(); i.hasNext();) { Item possibleCycle = i.next(); if (this == possibleCycle) { throw new QueryException("Cannot add " + item + " to " + this + " as it would create a cycle"); } else if (possibleCycle instanceof CompositeItem) { ensureNotInSubtree((CompositeItem) possibleCycle); } } } public void addItem(Item item) { adding(item); subitems.add(item); } protected void adding(Item item) { Validator.ensureNotNull("A composite item child", item); Validator.ensure("Attempted to add a composite to itself", item != this); if (item instanceof CompositeItem) { ensureNotInSubtree((CompositeItem) item); } item.setParent(this); } /** * Inserts the item at a position and increases the index of existing items * starting on this position by one * * @throws IndexOutOfBoundsException if the index is out of range */ public void addItem(int index, Item item) { if (index > subitems.size() || index < 0) { throw new IndexOutOfBoundsException("Could not add a subitem at position " + index + " to " + this); } adding(item); subitems.add(index, item); } /** For NOT items, which may wish to insert nulls */ void insertNullFirstItem() { subitems.add(0, null); } /** * Returns a subitem * * @param index the (0-base) index of the item to return * @throws IndexOutOfBoundsException if there is no subitem at index */ public Item getItem(int index) { return subitems.get(index); } /** * Replaces the item at the given index * * @param index the (0-base) index of the item to replace * @param item the new item * @return the old item at this position. The parent of the old item is <i>not</i> cleared * @throws IndexOutOfBoundsException if there is no item at this index */ public Item setItem(int index, Item item) { if (index >= subitems.size() || index < 0) throw new IndexOutOfBoundsException("Could not add a subitem at position " + index + " to " + this); adding(item); Item old = subitems.set(index, item); if (old!=item) removing(old); return old; } /** * Returns the index of a subitem * * @param item The child item to find the index of * @return the 0-base index of the child or -1 if there is no such child */ public int getItemIndex(Item item) { return subitems.indexOf(item); } /** * Removes the item at the given index * * @param index the index of the item to remove * @return the removed item * @throws IndexOutOfBoundsException if there is no item at the given index */ public Item removeItem(int index) { Item item = subitems.remove(index); removing(item); return item; } /** Always call on every remove */ private void removing(Item item) { if (item == null) { return; } if (item.getParent() == this) { item.setParent(null); } } /** * Removes the given item. Does nothing if the item is not present. * * @param item the item to remove * @return whether the item was removed */ public boolean removeItem(Item item) { boolean removed = subitems.remove(item); if (removed) { removing(item); } return removed; } /** Returns the number of direct ancestors of this item */ public int getItemCount() { return subitems.size(); } /** Returns a modifiable list iterator */ public ListIterator<Item> getItemIterator() { return new ListIteratorWrapper(this); } public int encode(ByteBuffer buffer) { encodeThis(buffer); int itemCount = 1; for (Iterator<Item> i = getItemIterator(); i.hasNext();) { Item subitem = i.next(); itemCount += subitem.encode(buffer); } return itemCount; } /** * Encodes just this item, not it's usual subitems, to the given buffer. */ protected void encodeThis(ByteBuffer buffer) { super.encodeThis(buffer); IntegerCompressor.putCompressedPositiveNumber(encodingArity(), buffer); } protected int encodingArity() { return subitems.size(); } protected void appendBodyString(StringBuilder buffer) { for (Iterator<Item> i = getItemIterator(); i.hasNext();) { Item item = i.next(); buffer.append(item.toString()); if (i.hasNext()) { buffer.append(" "); } } } /** Composite items should be parenthized when not on the top level */ protected boolean shouldParenthize() { return getParent()!= null && ! (getParent() instanceof QueryTree); } /** Returns a deep copy of this item */ private void fixConnexity(CompositeItem copy) { List<Item> flatland = new ArrayList<>(); List<Item> flatCopy = new ArrayList<>(); taggingFlatten(this, flatland); taggingFlatten(copy, flatCopy); int barrier = flatland.size(); for (int i = 0; i < barrier; ++i) { Item orig = flatland.get(i); int connectedTo = find(orig.connectedItem, flatland); if (connectedTo >= 0) { TaggableItem tagged = (TaggableItem) flatCopy.get(i); tagged.setConnectivity(flatCopy.get(connectedTo), orig.connectivity); } } } private void taggingFlatten(Item tree, List<Item> container) { if (tree.hasUniqueID()) { container.add(tree); } else if (tree instanceof CompositeItem) { CompositeItem asComposite = (CompositeItem) tree; for (Iterator<Item> i = asComposite.getItemIterator(); i.hasNext();) { taggingFlatten(i.next(), container); } } } private int find(Item needle, List<Item> haystack) { if (needle == null) { return -1; } int barrier = haystack.size(); for (int i = 0; i < barrier; ++i) { if (haystack.get(i) == needle) { return i; } } return -1; } public int hashCode() { int code = getName().hashCode() + subitems.size() * 17; for (int i = 0; i < subitems.size() && i <= 5; i++) { code += subitems.get(i).hashCode(); } return code; } /** * Returns whether this item is of the same class and * contains the same state as the given item */ public boolean equals(Object object) { if (!super.equals(object)) { return false; } CompositeItem other = (CompositeItem) object; if (!this.subitems.equals(other.subitems)) { return false; } return true; } /** Make composite immutable if this is supported. */ public void lock() {} /** Whether this composite is in a mutable state. */ public boolean isLocked() { return false; } /** Handles mutator calls correctly */ private static class ListIteratorWrapper implements ListIterator<Item> { private CompositeItem owner; private ListIterator<Item> wrapped; private Item current = null; public ListIteratorWrapper(CompositeItem owner) { this.owner = owner; wrapped = owner.subitems.listIterator(); } public boolean hasNext() { return wrapped.hasNext(); } public Item next() { current = wrapped.next(); return current; } public boolean hasPrevious() { return wrapped.hasPrevious(); } public Item previous() { Item current = wrapped.previous(); return current; } public int nextIndex() { return wrapped.nextIndex(); } public int previousIndex() { return wrapped.previousIndex(); } public void remove() { owner.removing(current); wrapped.remove(); } public void set(Item o) { Item newItem = o; owner.removing(current); owner.adding(newItem); current = newItem; wrapped.set(newItem); } public void add(Item o) { Item newItem = o; owner.adding(newItem); wrapped.add(o); } } @Override public int getTermCount() { int terms = 0; for (Item item : subitems) { terms += item.getTermCount(); } return terms; } }
Fixed
public LoadBalancerInstance create(ApplicationId application, ClusterSpec.Id cluster, Set<Real> reals) { final var proxyNodes = nodeRepository.getNodes(NodeType.proxy); if (proxyNodes.size() == 0) { throw new RuntimeException("Missing proxy nodes in nodeRepository"); } final var firstProxyNode = proxyNodes.get(0); final var networkNames = proxyNodes.stream() .flatMap(node -> node.ipAddresses().stream()) .collect(Collectors.toSet()); return new LoadBalancerInstance( HostName.from(firstProxyNode.hostname()), Optional.empty(), Set.of(4080, 4443), networkNames, reals ); }
throw new RuntimeException("Missing proxy nodes in nodeRepository");
public LoadBalancerInstance create(ApplicationId application, ClusterSpec.Id cluster, Set<Real> reals) { final var proxyNodes = nodeRepository.getNodes(NodeType.proxy); proxyNodes.sort(hostnameComparator); if (proxyNodes.size() == 0) { throw new IllegalStateException("Missing proxy nodes in nodeRepository"); } final var firstProxyNode = proxyNodes.get(0); final var networkNames = proxyNodes.stream() .flatMap(node -> node.ipAddresses().stream()) .map(SharedLoadBalancerService::addNetworKPrefixLength) .collect(Collectors.toSet()); return new LoadBalancerInstance( HostName.from(firstProxyNode.hostname()), Optional.empty(), Set.of(4080, 4443), networkNames, reals ); }
class SharedLoadBalancerService implements LoadBalancerService { private final NodeRepository nodeRepository; public SharedLoadBalancerService(NodeRepository nodeRepository) { this.nodeRepository = Objects.requireNonNull(nodeRepository, "Missing nodeRepository value"); } @Override @Override public void remove(ApplicationId application, ClusterSpec.Id cluster) { } @Override public Protocol protocol() { return Protocol.dualstack; } }
class SharedLoadBalancerService implements LoadBalancerService { private static final Comparator<Node> hostnameComparator = Comparator.comparing(Node::hostname); private final NodeRepository nodeRepository; @Inject public SharedLoadBalancerService(NodeRepository nodeRepository) { this.nodeRepository = Objects.requireNonNull(nodeRepository, "Missing nodeRepository value"); } @Override @Override public void remove(ApplicationId application, ClusterSpec.Id cluster) { } @Override public Protocol protocol() { return Protocol.dualstack; } private static String addNetworKPrefixLength(String address) { if (IP.isV6(address)) { return address + "/128"; } else { return address + "/32"; } } }
Fixed
public LoadBalancerInstance create(ApplicationId application, ClusterSpec.Id cluster, Set<Real> reals) { final var proxyNodes = nodeRepository.getNodes(NodeType.proxy); if (proxyNodes.size() == 0) { throw new RuntimeException("Missing proxy nodes in nodeRepository"); } final var firstProxyNode = proxyNodes.get(0); final var networkNames = proxyNodes.stream() .flatMap(node -> node.ipAddresses().stream()) .collect(Collectors.toSet()); return new LoadBalancerInstance( HostName.from(firstProxyNode.hostname()), Optional.empty(), Set.of(4080, 4443), networkNames, reals ); }
final var firstProxyNode = proxyNodes.get(0);
public LoadBalancerInstance create(ApplicationId application, ClusterSpec.Id cluster, Set<Real> reals) { final var proxyNodes = nodeRepository.getNodes(NodeType.proxy); proxyNodes.sort(hostnameComparator); if (proxyNodes.size() == 0) { throw new IllegalStateException("Missing proxy nodes in nodeRepository"); } final var firstProxyNode = proxyNodes.get(0); final var networkNames = proxyNodes.stream() .flatMap(node -> node.ipAddresses().stream()) .map(SharedLoadBalancerService::addNetworKPrefixLength) .collect(Collectors.toSet()); return new LoadBalancerInstance( HostName.from(firstProxyNode.hostname()), Optional.empty(), Set.of(4080, 4443), networkNames, reals ); }
class SharedLoadBalancerService implements LoadBalancerService { private final NodeRepository nodeRepository; public SharedLoadBalancerService(NodeRepository nodeRepository) { this.nodeRepository = Objects.requireNonNull(nodeRepository, "Missing nodeRepository value"); } @Override @Override public void remove(ApplicationId application, ClusterSpec.Id cluster) { } @Override public Protocol protocol() { return Protocol.dualstack; } }
class SharedLoadBalancerService implements LoadBalancerService { private static final Comparator<Node> hostnameComparator = Comparator.comparing(Node::hostname); private final NodeRepository nodeRepository; @Inject public SharedLoadBalancerService(NodeRepository nodeRepository) { this.nodeRepository = Objects.requireNonNull(nodeRepository, "Missing nodeRepository value"); } @Override @Override public void remove(ApplicationId application, ClusterSpec.Id cluster) { } @Override public Protocol protocol() { return Protocol.dualstack; } private static String addNetworKPrefixLength(String address) { if (IP.isV6(address)) { return address + "/128"; } else { return address + "/32"; } } }
Fixed
public LoadBalancerInstance create(ApplicationId application, ClusterSpec.Id cluster, Set<Real> reals) { final var proxyNodes = nodeRepository.getNodes(NodeType.proxy); if (proxyNodes.size() == 0) { throw new RuntimeException("Missing proxy nodes in nodeRepository"); } final var firstProxyNode = proxyNodes.get(0); final var networkNames = proxyNodes.stream() .flatMap(node -> node.ipAddresses().stream()) .collect(Collectors.toSet()); return new LoadBalancerInstance( HostName.from(firstProxyNode.hostname()), Optional.empty(), Set.of(4080, 4443), networkNames, reals ); }
networkNames,
public LoadBalancerInstance create(ApplicationId application, ClusterSpec.Id cluster, Set<Real> reals) { final var proxyNodes = nodeRepository.getNodes(NodeType.proxy); proxyNodes.sort(hostnameComparator); if (proxyNodes.size() == 0) { throw new IllegalStateException("Missing proxy nodes in nodeRepository"); } final var firstProxyNode = proxyNodes.get(0); final var networkNames = proxyNodes.stream() .flatMap(node -> node.ipAddresses().stream()) .map(SharedLoadBalancerService::addNetworKPrefixLength) .collect(Collectors.toSet()); return new LoadBalancerInstance( HostName.from(firstProxyNode.hostname()), Optional.empty(), Set.of(4080, 4443), networkNames, reals ); }
class SharedLoadBalancerService implements LoadBalancerService { private final NodeRepository nodeRepository; public SharedLoadBalancerService(NodeRepository nodeRepository) { this.nodeRepository = Objects.requireNonNull(nodeRepository, "Missing nodeRepository value"); } @Override @Override public void remove(ApplicationId application, ClusterSpec.Id cluster) { } @Override public Protocol protocol() { return Protocol.dualstack; } }
class SharedLoadBalancerService implements LoadBalancerService { private static final Comparator<Node> hostnameComparator = Comparator.comparing(Node::hostname); private final NodeRepository nodeRepository; @Inject public SharedLoadBalancerService(NodeRepository nodeRepository) { this.nodeRepository = Objects.requireNonNull(nodeRepository, "Missing nodeRepository value"); } @Override @Override public void remove(ApplicationId application, ClusterSpec.Id cluster) { } @Override public Protocol protocol() { return Protocol.dualstack; } private static String addNetworKPrefixLength(String address) { if (IP.isV6(address)) { return address + "/128"; } else { return address + "/32"; } } }
Consider adding a TODO here so that we remember to remove this after the next version has been released.
private List<RotationId> rotationsFromSlime(Inspector root) { final var rotations = rotationListFromSlime(root.field(rotationsField)); final var legacyRotation = legacyRotationFromSlime(root.field(deprecatedRotationField)); legacyRotation.ifPresent(rotations::add); return rotations; }
final var legacyRotation = legacyRotationFromSlime(root.field(deprecatedRotationField));
private List<RotationId> rotationsFromSlime(Inspector root) { final var rotations = rotationListFromSlime(root.field(rotationsField)); final var legacyRotation = legacyRotationFromSlime(root.field(deprecatedRotationField)); legacyRotation.ifPresent(rotations::add); return rotations; }
class ApplicationSerializer { private final String idField = "id"; private final String createdAtField = "createdAt"; private final String deploymentSpecField = "deploymentSpecField"; private final String validationOverridesField = "validationOverrides"; private final String deploymentsField = "deployments"; private final String deploymentJobsField = "deploymentJobs"; private final String deployingField = "deployingField"; private final String pinnedField = "pinned"; private final String outstandingChangeField = "outstandingChangeField"; private final String ownershipIssueIdField = "ownershipIssueId"; private final String ownerField = "confirmedOwner"; private final String majorVersionField = "majorVersion"; private final String writeQualityField = "writeQuality"; private final String queryQualityField = "queryQuality"; private final String pemDeployKeyField = "pemDeployKey"; private final String rotationsField = "rotations"; private final String deprecatedRotationField = "rotation"; private final String rotationStatusField = "rotationStatus"; private final String zoneField = "zone"; private final String environmentField = "environment"; private final String regionField = "region"; private final String deployTimeField = "deployTime"; private final String applicationBuildNumberField = "applicationBuildNumber"; private final String applicationPackageRevisionField = "applicationPackageRevision"; private final String sourceRevisionField = "sourceRevision"; private final String repositoryField = "repositoryField"; private final String branchField = "branchField"; private final String commitField = "commitField"; private final String authorEmailField = "authorEmailField"; private final String compileVersionField = "compileVersion"; private final String buildTimeField = "buildTime"; private final String lastQueriedField = "lastQueried"; private final String lastWrittenField = "lastWritten"; private final String lastQueriesPerSecondField = "lastQueriesPerSecond"; private final String lastWritesPerSecondField = "lastWritesPerSecond"; private final String projectIdField = "projectId"; private final String jobStatusField = "jobStatus"; private final String issueIdField = "jiraIssueId"; private final String builtInternallyField = "builtInternally"; private final String jobTypeField = "jobType"; private final String errorField = "jobError"; private final String lastTriggeredField = "lastTriggered"; private final String lastCompletedField = "lastCompleted"; private final String firstFailingField = "firstFailing"; private final String lastSuccessField = "lastSuccess"; private final String pausedUntilField = "pausedUntil"; private final String jobRunIdField = "id"; private final String versionField = "version"; private final String revisionField = "revision"; private final String sourceVersionField = "sourceVersion"; private final String sourceApplicationField = "sourceRevision"; private final String reasonField = "reason"; private final String atField = "at"; private final String clusterInfoField = "clusterInfo"; private final String clusterInfoFlavorField = "flavor"; private final String clusterInfoCostField = "cost"; private final String clusterInfoCpuField = "flavorCpu"; private final String clusterInfoMemField = "flavorMem"; private final String clusterInfoDiskField = "flavorDisk"; private final String clusterInfoTypeField = "clusterType"; private final String clusterInfoHostnamesField = "hostnames"; private final String clusterUtilsField = "clusterUtils"; private final String clusterUtilsCpuField = "cpu"; private final String clusterUtilsMemField = "mem"; private final String clusterUtilsDiskField = "disk"; private final String clusterUtilsDiskBusyField = "diskbusy"; private final String deploymentMetricsField = "metrics"; private final String deploymentMetricsQPSField = "queriesPerSecond"; private final String deploymentMetricsWPSField = "writesPerSecond"; private final String deploymentMetricsDocsField = "documentCount"; private final String deploymentMetricsQueryLatencyField = "queryLatencyMillis"; private final String deploymentMetricsWriteLatencyField = "writeLatencyMillis"; private final String deploymentMetricsUpdateTime = "lastUpdated"; private final String deploymentMetricsWarningsField = "warnings"; public Slime toSlime(Application application) { Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString(idField, application.id().serializedForm()); root.setLong(createdAtField, application.createdAt().toEpochMilli()); root.setString(deploymentSpecField, application.deploymentSpec().xmlForm()); root.setString(validationOverridesField, application.validationOverrides().xmlForm()); deploymentsToSlime(application.deployments().values(), root.setArray(deploymentsField)); toSlime(application.deploymentJobs(), root.setObject(deploymentJobsField)); toSlime(application.change(), root, deployingField); toSlime(application.outstandingChange(), root, outstandingChangeField); application.ownershipIssueId().ifPresent(issueId -> root.setString(ownershipIssueIdField, issueId.value())); application.owner().ifPresent(owner -> root.setString(ownerField, owner.username())); application.majorVersion().ifPresent(majorVersion -> root.setLong(majorVersionField, majorVersion)); root.setDouble(queryQualityField, application.metrics().queryServiceQuality()); root.setDouble(writeQualityField, application.metrics().writeServiceQuality()); application.pemDeployKey().ifPresent(pemDeployKey -> root.setString(pemDeployKeyField, pemDeployKey)); Cursor rotations = root.setArray(rotationsField); application.rotations().forEach(rotation -> rotations.addString(rotation.asString())); toSlime(application.rotationStatus(), root.setArray(rotationStatusField)); return slime; } private void deploymentsToSlime(Collection<Deployment> deployments, Cursor array) { for (Deployment deployment : deployments) deploymentToSlime(deployment, array.addObject()); } private void deploymentToSlime(Deployment deployment, Cursor object) { zoneIdToSlime(deployment.zone(), object.setObject(zoneField)); object.setString(versionField, deployment.version().toString()); object.setLong(deployTimeField, deployment.at().toEpochMilli()); toSlime(deployment.applicationVersion(), object.setObject(applicationPackageRevisionField)); clusterInfoToSlime(deployment.clusterInfo(), object); clusterUtilsToSlime(deployment.clusterUtils(), object); deploymentMetricsToSlime(deployment.metrics(), object); deployment.activity().lastQueried().ifPresent(instant -> object.setLong(lastQueriedField, instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> object.setLong(lastWrittenField, instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> object.setDouble(lastQueriesPerSecondField, value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> object.setDouble(lastWritesPerSecondField, value)); } private void deploymentMetricsToSlime(DeploymentMetrics metrics, Cursor object) { Cursor root = object.setObject(deploymentMetricsField); root.setDouble(deploymentMetricsQPSField, metrics.queriesPerSecond()); root.setDouble(deploymentMetricsWPSField, metrics.writesPerSecond()); root.setDouble(deploymentMetricsDocsField, metrics.documentCount()); root.setDouble(deploymentMetricsQueryLatencyField, metrics.queryLatencyMillis()); root.setDouble(deploymentMetricsWriteLatencyField, metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> root.setLong(deploymentMetricsUpdateTime, instant.toEpochMilli())); if (!metrics.warnings().isEmpty()) { Cursor warningsObject = root.setObject(deploymentMetricsWarningsField); metrics.warnings().forEach((warning, count) -> warningsObject.setLong(warning.name(), count)); } } private void clusterInfoToSlime(Map<ClusterSpec.Id, ClusterInfo> clusters, Cursor object) { Cursor root = object.setObject(clusterInfoField); for (Map.Entry<ClusterSpec.Id, ClusterInfo> entry : clusters.entrySet()) { toSlime(entry.getValue(), root.setObject(entry.getKey().value())); } } private void toSlime(ClusterInfo info, Cursor object) { object.setString(clusterInfoFlavorField, info.getFlavor()); object.setLong(clusterInfoCostField, info.getFlavorCost()); object.setDouble(clusterInfoCpuField, info.getFlavorCPU()); object.setDouble(clusterInfoMemField, info.getFlavorMem()); object.setDouble(clusterInfoDiskField, info.getFlavorDisk()); object.setString(clusterInfoTypeField, info.getClusterType().name()); Cursor array = object.setArray(clusterInfoHostnamesField); for (String host : info.getHostnames()) { array.addString(host); } } private void clusterUtilsToSlime(Map<ClusterSpec.Id, ClusterUtilization> clusters, Cursor object) { Cursor root = object.setObject(clusterUtilsField); for (Map.Entry<ClusterSpec.Id, ClusterUtilization> entry : clusters.entrySet()) { toSlime(entry.getValue(), root.setObject(entry.getKey().value())); } } private void toSlime(ClusterUtilization utils, Cursor object) { object.setDouble(clusterUtilsCpuField, utils.getCpu()); object.setDouble(clusterUtilsMemField, utils.getMemory()); object.setDouble(clusterUtilsDiskField, utils.getDisk()); object.setDouble(clusterUtilsDiskBusyField, utils.getDiskBusy()); } private void zoneIdToSlime(ZoneId zone, Cursor object) { object.setString(environmentField, zone.environment().value()); object.setString(regionField, zone.region().value()); } private void toSlime(ApplicationVersion applicationVersion, Cursor object) { if (applicationVersion.buildNumber().isPresent() && applicationVersion.source().isPresent()) { object.setLong(applicationBuildNumberField, applicationVersion.buildNumber().getAsLong()); toSlime(applicationVersion.source().get(), object.setObject(sourceRevisionField)); applicationVersion.authorEmail().ifPresent(email -> object.setString(authorEmailField, email)); applicationVersion.compileVersion().ifPresent(version -> object.setString(compileVersionField, version.toString())); applicationVersion.buildTime().ifPresent(time -> object.setLong(buildTimeField, time.toEpochMilli())); } } private void toSlime(SourceRevision sourceRevision, Cursor object) { object.setString(repositoryField, sourceRevision.repository()); object.setString(branchField, sourceRevision.branch()); object.setString(commitField, sourceRevision.commit()); } private void toSlime(DeploymentJobs deploymentJobs, Cursor cursor) { deploymentJobs.projectId().ifPresent(projectId -> cursor.setLong(projectIdField, projectId)); jobStatusToSlime(deploymentJobs.jobStatus().values(), cursor.setArray(jobStatusField)); deploymentJobs.issueId().ifPresent(jiraIssueId -> cursor.setString(issueIdField, jiraIssueId.value())); cursor.setBool(builtInternallyField, deploymentJobs.deployedInternally()); } private void jobStatusToSlime(Collection<JobStatus> jobStatuses, Cursor jobStatusArray) { for (JobStatus jobStatus : jobStatuses) toSlime(jobStatus, jobStatusArray.addObject()); } private void toSlime(JobStatus jobStatus, Cursor object) { object.setString(jobTypeField, jobStatus.type().jobName()); if (jobStatus.jobError().isPresent()) object.setString(errorField, jobStatus.jobError().get().name()); jobStatus.lastTriggered().ifPresent(run -> jobRunToSlime(run, object, lastTriggeredField)); jobStatus.lastCompleted().ifPresent(run -> jobRunToSlime(run, object, lastCompletedField)); jobStatus.lastSuccess().ifPresent(run -> jobRunToSlime(run, object, lastSuccessField)); jobStatus.firstFailing().ifPresent(run -> jobRunToSlime(run, object, firstFailingField)); jobStatus.pausedUntil().ifPresent(until -> object.setLong(pausedUntilField, until)); } private void jobRunToSlime(JobStatus.JobRun jobRun, Cursor parent, String jobRunObjectName) { Cursor object = parent.setObject(jobRunObjectName); object.setLong(jobRunIdField, jobRun.id()); object.setString(versionField, jobRun.platform().toString()); toSlime(jobRun.application(), object.setObject(revisionField)); jobRun.sourcePlatform().ifPresent(version -> object.setString(sourceVersionField, version.toString())); jobRun.sourceApplication().ifPresent(version -> toSlime(version, object.setObject(sourceApplicationField))); object.setString(reasonField, jobRun.reason()); object.setLong(atField, jobRun.at().toEpochMilli()); } private void toSlime(Change deploying, Cursor parentObject, String fieldName) { if (deploying.isEmpty()) return; Cursor object = parentObject.setObject(fieldName); if (deploying.platform().isPresent()) object.setString(versionField, deploying.platform().get().toString()); if (deploying.application().isPresent()) toSlime(deploying.application().get(), object); if (deploying.isPinned()) object.setBool(pinnedField, true); } private void toSlime(Map<HostName, RotationStatus> rotationStatus, Cursor array) { rotationStatus.forEach((hostname, status) -> { Cursor object = array.addObject(); object.setString("hostname", hostname.value()); object.setString("status", status.name()); }); } public Application fromSlime(Slime slime) { Inspector root = slime.get(); ApplicationId id = ApplicationId.fromSerializedForm(root.field(idField).asString()); Instant createdAt = Instant.ofEpochMilli(root.field(createdAtField).asLong()); DeploymentSpec deploymentSpec = DeploymentSpec.fromXml(root.field(deploymentSpecField).asString(), false); ValidationOverrides validationOverrides = ValidationOverrides.fromXml(root.field(validationOverridesField).asString()); List<Deployment> deployments = deploymentsFromSlime(root.field(deploymentsField)); DeploymentJobs deploymentJobs = deploymentJobsFromSlime(root.field(deploymentJobsField)); Change deploying = changeFromSlime(root.field(deployingField)); Change outstandingChange = changeFromSlime(root.field(outstandingChangeField)); Optional<IssueId> ownershipIssueId = optionalString(root.field(ownershipIssueIdField)).map(IssueId::from); Optional<User> owner = optionalString(root.field(ownerField)).map(User::from); OptionalInt majorVersion = optionalInteger(root.field(majorVersionField)); ApplicationMetrics metrics = new ApplicationMetrics(root.field(queryQualityField).asDouble(), root.field(writeQualityField).asDouble()); Optional<String> pemDeployKey = optionalString(root.field(pemDeployKeyField)); List<RotationId> rotations = rotationsFromSlime(root); Map<HostName, RotationStatus> rotationStatus = rotationStatusFromSlime(root.field(rotationStatusField)); return new Application(id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, deploying, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } private List<Deployment> deploymentsFromSlime(Inspector array) { List<Deployment> deployments = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> deployments.add(deploymentFromSlime(item))); return deployments; } private Deployment deploymentFromSlime(Inspector deploymentObject) { return new Deployment(zoneIdFromSlime(deploymentObject.field(zoneField)), applicationVersionFromSlime(deploymentObject.field(applicationPackageRevisionField)), Version.fromString(deploymentObject.field(versionField).asString()), Instant.ofEpochMilli(deploymentObject.field(deployTimeField).asLong()), clusterUtilsMapFromSlime(deploymentObject.field(clusterUtilsField)), clusterInfoMapFromSlime(deploymentObject.field(clusterInfoField)), deploymentMetricsFromSlime(deploymentObject.field(deploymentMetricsField)), DeploymentActivity.create(optionalInstant(deploymentObject.field(lastQueriedField)), optionalInstant(deploymentObject.field(lastWrittenField)), optionalDouble(deploymentObject.field(lastQueriesPerSecondField)), optionalDouble(deploymentObject.field(lastWritesPerSecondField)))); } private DeploymentMetrics deploymentMetricsFromSlime(Inspector object) { Optional<Instant> instant = object.field(deploymentMetricsUpdateTime).valid() ? Optional.of(Instant.ofEpochMilli(object.field(deploymentMetricsUpdateTime).asLong())) : Optional.empty(); return new DeploymentMetrics(object.field(deploymentMetricsQPSField).asDouble(), object.field(deploymentMetricsWPSField).asDouble(), object.field(deploymentMetricsDocsField).asDouble(), object.field(deploymentMetricsQueryLatencyField).asDouble(), object.field(deploymentMetricsWriteLatencyField).asDouble(), instant, deploymentWarningsFrom(object.field(deploymentMetricsWarningsField))); } private Map<DeploymentMetrics.Warning, Integer> deploymentWarningsFrom(Inspector object) { Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>(); object.traverse((ObjectTraverser) (name, value) -> warnings.put(DeploymentMetrics.Warning.valueOf(name), (int) value.asLong())); return Collections.unmodifiableMap(warnings); } private Map<HostName, RotationStatus> rotationStatusFromSlime(Inspector object) { if (!object.valid()) { return Collections.emptyMap(); } Map<HostName, RotationStatus> rotationStatus = new TreeMap<>(); object.traverse((ArrayTraverser) (idx, inspect) -> { HostName hostname = HostName.from(inspect.field("hostname").asString()); RotationStatus status = RotationStatus.valueOf(inspect.field("status").asString()); rotationStatus.put(hostname, status); }); return Collections.unmodifiableMap(rotationStatus); } private Map<ClusterSpec.Id, ClusterInfo> clusterInfoMapFromSlime (Inspector object) { Map<ClusterSpec.Id, ClusterInfo> map = new HashMap<>(); object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterInfoFromSlime(value))); return map; } private Map<ClusterSpec.Id, ClusterUtilization> clusterUtilsMapFromSlime(Inspector object) { Map<ClusterSpec.Id, ClusterUtilization> map = new HashMap<>(); object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterUtililzationFromSlime(value))); return map; } private ClusterUtilization clusterUtililzationFromSlime(Inspector object) { double cpu = object.field(clusterUtilsCpuField).asDouble(); double mem = object.field(clusterUtilsMemField).asDouble(); double disk = object.field(clusterUtilsDiskField).asDouble(); double diskBusy = object.field(clusterUtilsDiskBusyField).asDouble(); return new ClusterUtilization(mem, cpu, disk, diskBusy); } private ClusterInfo clusterInfoFromSlime(Inspector inspector) { String flavor = inspector.field(clusterInfoFlavorField).asString(); int cost = (int)inspector.field(clusterInfoCostField).asLong(); String type = inspector.field(clusterInfoTypeField).asString(); double flavorCpu = inspector.field(clusterInfoCpuField).asDouble(); double flavorMem = inspector.field(clusterInfoMemField).asDouble(); double flavorDisk = inspector.field(clusterInfoDiskField).asDouble(); List<String> hostnames = new ArrayList<>(); inspector.field(clusterInfoHostnamesField).traverse((ArrayTraverser)(int index, Inspector value) -> hostnames.add(value.asString())); return new ClusterInfo(flavor, cost, flavorCpu, flavorMem, flavorDisk, ClusterSpec.Type.from(type), hostnames); } private ZoneId zoneIdFromSlime(Inspector object) { return ZoneId.from(object.field(environmentField).asString(), object.field(regionField).asString()); } private ApplicationVersion applicationVersionFromSlime(Inspector object) { if ( ! object.valid()) return ApplicationVersion.unknown; OptionalLong applicationBuildNumber = optionalLong(object.field(applicationBuildNumberField)); Optional<SourceRevision> sourceRevision = sourceRevisionFromSlime(object.field(sourceRevisionField)); if ( ! sourceRevision.isPresent() || ! applicationBuildNumber.isPresent()) { return ApplicationVersion.unknown; } Optional<String> authorEmail = optionalString(object.field(authorEmailField)); Optional<Version> compileVersion = optionalString(object.field(compileVersionField)).map(Version::fromString); Optional<Instant> buildTime = optionalInstant(object.field(buildTimeField)); if ( ! authorEmail.isPresent()) return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong()); if ( ! compileVersion.isPresent() || ! buildTime.isPresent()) return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong(), authorEmail.get()); return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong(), authorEmail.get(), compileVersion.get(), buildTime.get()); } private Optional<SourceRevision> sourceRevisionFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new SourceRevision(object.field(repositoryField).asString(), object.field(branchField).asString(), object.field(commitField).asString())); } private DeploymentJobs deploymentJobsFromSlime(Inspector object) { OptionalLong projectId = optionalLong(object.field(projectIdField)); List<JobStatus> jobStatusList = jobStatusListFromSlime(object.field(jobStatusField)); Optional<IssueId> issueId = optionalString(object.field(issueIdField)).map(IssueId::from); boolean builtInternally = object.field(builtInternallyField).asBool(); return new DeploymentJobs(projectId, jobStatusList, issueId, builtInternally); } private Change changeFromSlime(Inspector object) { if ( ! object.valid()) return Change.empty(); Inspector versionFieldValue = object.field(versionField); Change change = Change.empty(); if (versionFieldValue.valid()) change = Change.of(Version.fromString(versionFieldValue.asString())); if (object.field(applicationBuildNumberField).valid()) change = change.with(applicationVersionFromSlime(object)); if (object.field(pinnedField).asBool()) change = change.withPin(); return change; } private List<JobStatus> jobStatusListFromSlime(Inspector array) { List<JobStatus> jobStatusList = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> jobStatusFromSlime(item).ifPresent(jobStatusList::add)); return jobStatusList; } private Optional<JobStatus> jobStatusFromSlime(Inspector object) { Optional<JobType> jobType = JobType.fromOptionalJobName(object.field(jobTypeField).asString()); if (! jobType.isPresent()) return Optional.empty(); Optional<JobError> jobError = Optional.empty(); if (object.field(errorField).valid()) jobError = Optional.of(JobError.valueOf(object.field(errorField).asString())); return Optional.of(new JobStatus(jobType.get(), jobError, jobRunFromSlime(object.field(lastTriggeredField)), jobRunFromSlime(object.field(lastCompletedField)), jobRunFromSlime(object.field(firstFailingField)), jobRunFromSlime(object.field(lastSuccessField)), optionalLong(object.field(pausedUntilField)))); } private Optional<JobStatus.JobRun> jobRunFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new JobStatus.JobRun(object.field(jobRunIdField).asLong(), new Version(object.field(versionField).asString()), applicationVersionFromSlime(object.field(revisionField)), optionalString(object.field(sourceVersionField)).map(Version::fromString), Optional.of(object.field(sourceApplicationField)).filter(Inspector::valid).map(this::applicationVersionFromSlime), object.field(reasonField).asString(), Instant.ofEpochMilli(object.field(atField).asLong()))); } private List<RotationId> rotationListFromSlime(Inspector field) { final var rotations = new ArrayList<RotationId>(); for (int i = 0; i < field.entries(); ++i) { var entry = field.entry(i); rotations.add(new RotationId(entry.asString())); } return rotations; } private Optional<RotationId> legacyRotationFromSlime(Inspector field) { return field.valid() ? optionalString(field).map(RotationId::new) : Optional.empty(); } private OptionalLong optionalLong(Inspector field) { return field.valid() ? OptionalLong.of(field.asLong()) : OptionalLong.empty(); } private OptionalInt optionalInteger(Inspector field) { return field.valid() ? OptionalInt.of((int) field.asLong()) : OptionalInt.empty(); } private OptionalDouble optionalDouble(Inspector field) { return field.valid() ? OptionalDouble.of(field.asDouble()) : OptionalDouble.empty(); } private Optional<String> optionalString(Inspector field) { return SlimeUtils.optionalString(field); } private Optional<Instant> optionalInstant(Inspector field) { OptionalLong value = optionalLong(field); return value.isPresent() ? Optional.of(Instant.ofEpochMilli(value.getAsLong())) : Optional.empty(); } }
class ApplicationSerializer { private final String idField = "id"; private final String createdAtField = "createdAt"; private final String deploymentSpecField = "deploymentSpecField"; private final String validationOverridesField = "validationOverrides"; private final String deploymentsField = "deployments"; private final String deploymentJobsField = "deploymentJobs"; private final String deployingField = "deployingField"; private final String pinnedField = "pinned"; private final String outstandingChangeField = "outstandingChangeField"; private final String ownershipIssueIdField = "ownershipIssueId"; private final String ownerField = "confirmedOwner"; private final String majorVersionField = "majorVersion"; private final String writeQualityField = "writeQuality"; private final String queryQualityField = "queryQuality"; private final String pemDeployKeyField = "pemDeployKey"; private final String rotationsField = "endpoints"; private final String deprecatedRotationField = "rotation"; private final String rotationStatusField = "rotationStatus"; private final String zoneField = "zone"; private final String environmentField = "environment"; private final String regionField = "region"; private final String deployTimeField = "deployTime"; private final String applicationBuildNumberField = "applicationBuildNumber"; private final String applicationPackageRevisionField = "applicationPackageRevision"; private final String sourceRevisionField = "sourceRevision"; private final String repositoryField = "repositoryField"; private final String branchField = "branchField"; private final String commitField = "commitField"; private final String authorEmailField = "authorEmailField"; private final String compileVersionField = "compileVersion"; private final String buildTimeField = "buildTime"; private final String lastQueriedField = "lastQueried"; private final String lastWrittenField = "lastWritten"; private final String lastQueriesPerSecondField = "lastQueriesPerSecond"; private final String lastWritesPerSecondField = "lastWritesPerSecond"; private final String projectIdField = "projectId"; private final String jobStatusField = "jobStatus"; private final String issueIdField = "jiraIssueId"; private final String builtInternallyField = "builtInternally"; private final String jobTypeField = "jobType"; private final String errorField = "jobError"; private final String lastTriggeredField = "lastTriggered"; private final String lastCompletedField = "lastCompleted"; private final String firstFailingField = "firstFailing"; private final String lastSuccessField = "lastSuccess"; private final String pausedUntilField = "pausedUntil"; private final String jobRunIdField = "id"; private final String versionField = "version"; private final String revisionField = "revision"; private final String sourceVersionField = "sourceVersion"; private final String sourceApplicationField = "sourceRevision"; private final String reasonField = "reason"; private final String atField = "at"; private final String clusterInfoField = "clusterInfo"; private final String clusterInfoFlavorField = "flavor"; private final String clusterInfoCostField = "cost"; private final String clusterInfoCpuField = "flavorCpu"; private final String clusterInfoMemField = "flavorMem"; private final String clusterInfoDiskField = "flavorDisk"; private final String clusterInfoTypeField = "clusterType"; private final String clusterInfoHostnamesField = "hostnames"; private final String clusterUtilsField = "clusterUtils"; private final String clusterUtilsCpuField = "cpu"; private final String clusterUtilsMemField = "mem"; private final String clusterUtilsDiskField = "disk"; private final String clusterUtilsDiskBusyField = "diskbusy"; private final String deploymentMetricsField = "metrics"; private final String deploymentMetricsQPSField = "queriesPerSecond"; private final String deploymentMetricsWPSField = "writesPerSecond"; private final String deploymentMetricsDocsField = "documentCount"; private final String deploymentMetricsQueryLatencyField = "queryLatencyMillis"; private final String deploymentMetricsWriteLatencyField = "writeLatencyMillis"; private final String deploymentMetricsUpdateTime = "lastUpdated"; private final String deploymentMetricsWarningsField = "warnings"; public Slime toSlime(Application application) { Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString(idField, application.id().serializedForm()); root.setLong(createdAtField, application.createdAt().toEpochMilli()); root.setString(deploymentSpecField, application.deploymentSpec().xmlForm()); root.setString(validationOverridesField, application.validationOverrides().xmlForm()); deploymentsToSlime(application.deployments().values(), root.setArray(deploymentsField)); toSlime(application.deploymentJobs(), root.setObject(deploymentJobsField)); toSlime(application.change(), root, deployingField); toSlime(application.outstandingChange(), root, outstandingChangeField); application.ownershipIssueId().ifPresent(issueId -> root.setString(ownershipIssueIdField, issueId.value())); application.owner().ifPresent(owner -> root.setString(ownerField, owner.username())); application.majorVersion().ifPresent(majorVersion -> root.setLong(majorVersionField, majorVersion)); root.setDouble(queryQualityField, application.metrics().queryServiceQuality()); root.setDouble(writeQualityField, application.metrics().writeServiceQuality()); application.pemDeployKey().ifPresent(pemDeployKey -> root.setString(pemDeployKeyField, pemDeployKey)); Cursor rotations = root.setArray(rotationsField); application.rotations().forEach(rotation -> rotations.addString(rotation.asString())); toSlime(application.rotationStatus(), root.setArray(rotationStatusField)); return slime; } private void deploymentsToSlime(Collection<Deployment> deployments, Cursor array) { for (Deployment deployment : deployments) deploymentToSlime(deployment, array.addObject()); } private void deploymentToSlime(Deployment deployment, Cursor object) { zoneIdToSlime(deployment.zone(), object.setObject(zoneField)); object.setString(versionField, deployment.version().toString()); object.setLong(deployTimeField, deployment.at().toEpochMilli()); toSlime(deployment.applicationVersion(), object.setObject(applicationPackageRevisionField)); clusterInfoToSlime(deployment.clusterInfo(), object); clusterUtilsToSlime(deployment.clusterUtils(), object); deploymentMetricsToSlime(deployment.metrics(), object); deployment.activity().lastQueried().ifPresent(instant -> object.setLong(lastQueriedField, instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> object.setLong(lastWrittenField, instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> object.setDouble(lastQueriesPerSecondField, value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> object.setDouble(lastWritesPerSecondField, value)); } private void deploymentMetricsToSlime(DeploymentMetrics metrics, Cursor object) { Cursor root = object.setObject(deploymentMetricsField); root.setDouble(deploymentMetricsQPSField, metrics.queriesPerSecond()); root.setDouble(deploymentMetricsWPSField, metrics.writesPerSecond()); root.setDouble(deploymentMetricsDocsField, metrics.documentCount()); root.setDouble(deploymentMetricsQueryLatencyField, metrics.queryLatencyMillis()); root.setDouble(deploymentMetricsWriteLatencyField, metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> root.setLong(deploymentMetricsUpdateTime, instant.toEpochMilli())); if (!metrics.warnings().isEmpty()) { Cursor warningsObject = root.setObject(deploymentMetricsWarningsField); metrics.warnings().forEach((warning, count) -> warningsObject.setLong(warning.name(), count)); } } private void clusterInfoToSlime(Map<ClusterSpec.Id, ClusterInfo> clusters, Cursor object) { Cursor root = object.setObject(clusterInfoField); for (Map.Entry<ClusterSpec.Id, ClusterInfo> entry : clusters.entrySet()) { toSlime(entry.getValue(), root.setObject(entry.getKey().value())); } } private void toSlime(ClusterInfo info, Cursor object) { object.setString(clusterInfoFlavorField, info.getFlavor()); object.setLong(clusterInfoCostField, info.getFlavorCost()); object.setDouble(clusterInfoCpuField, info.getFlavorCPU()); object.setDouble(clusterInfoMemField, info.getFlavorMem()); object.setDouble(clusterInfoDiskField, info.getFlavorDisk()); object.setString(clusterInfoTypeField, info.getClusterType().name()); Cursor array = object.setArray(clusterInfoHostnamesField); for (String host : info.getHostnames()) { array.addString(host); } } private void clusterUtilsToSlime(Map<ClusterSpec.Id, ClusterUtilization> clusters, Cursor object) { Cursor root = object.setObject(clusterUtilsField); for (Map.Entry<ClusterSpec.Id, ClusterUtilization> entry : clusters.entrySet()) { toSlime(entry.getValue(), root.setObject(entry.getKey().value())); } } private void toSlime(ClusterUtilization utils, Cursor object) { object.setDouble(clusterUtilsCpuField, utils.getCpu()); object.setDouble(clusterUtilsMemField, utils.getMemory()); object.setDouble(clusterUtilsDiskField, utils.getDisk()); object.setDouble(clusterUtilsDiskBusyField, utils.getDiskBusy()); } private void zoneIdToSlime(ZoneId zone, Cursor object) { object.setString(environmentField, zone.environment().value()); object.setString(regionField, zone.region().value()); } private void toSlime(ApplicationVersion applicationVersion, Cursor object) { if (applicationVersion.buildNumber().isPresent() && applicationVersion.source().isPresent()) { object.setLong(applicationBuildNumberField, applicationVersion.buildNumber().getAsLong()); toSlime(applicationVersion.source().get(), object.setObject(sourceRevisionField)); applicationVersion.authorEmail().ifPresent(email -> object.setString(authorEmailField, email)); applicationVersion.compileVersion().ifPresent(version -> object.setString(compileVersionField, version.toString())); applicationVersion.buildTime().ifPresent(time -> object.setLong(buildTimeField, time.toEpochMilli())); } } private void toSlime(SourceRevision sourceRevision, Cursor object) { object.setString(repositoryField, sourceRevision.repository()); object.setString(branchField, sourceRevision.branch()); object.setString(commitField, sourceRevision.commit()); } private void toSlime(DeploymentJobs deploymentJobs, Cursor cursor) { deploymentJobs.projectId().ifPresent(projectId -> cursor.setLong(projectIdField, projectId)); jobStatusToSlime(deploymentJobs.jobStatus().values(), cursor.setArray(jobStatusField)); deploymentJobs.issueId().ifPresent(jiraIssueId -> cursor.setString(issueIdField, jiraIssueId.value())); cursor.setBool(builtInternallyField, deploymentJobs.deployedInternally()); } private void jobStatusToSlime(Collection<JobStatus> jobStatuses, Cursor jobStatusArray) { for (JobStatus jobStatus : jobStatuses) toSlime(jobStatus, jobStatusArray.addObject()); } private void toSlime(JobStatus jobStatus, Cursor object) { object.setString(jobTypeField, jobStatus.type().jobName()); if (jobStatus.jobError().isPresent()) object.setString(errorField, jobStatus.jobError().get().name()); jobStatus.lastTriggered().ifPresent(run -> jobRunToSlime(run, object, lastTriggeredField)); jobStatus.lastCompleted().ifPresent(run -> jobRunToSlime(run, object, lastCompletedField)); jobStatus.lastSuccess().ifPresent(run -> jobRunToSlime(run, object, lastSuccessField)); jobStatus.firstFailing().ifPresent(run -> jobRunToSlime(run, object, firstFailingField)); jobStatus.pausedUntil().ifPresent(until -> object.setLong(pausedUntilField, until)); } private void jobRunToSlime(JobStatus.JobRun jobRun, Cursor parent, String jobRunObjectName) { Cursor object = parent.setObject(jobRunObjectName); object.setLong(jobRunIdField, jobRun.id()); object.setString(versionField, jobRun.platform().toString()); toSlime(jobRun.application(), object.setObject(revisionField)); jobRun.sourcePlatform().ifPresent(version -> object.setString(sourceVersionField, version.toString())); jobRun.sourceApplication().ifPresent(version -> toSlime(version, object.setObject(sourceApplicationField))); object.setString(reasonField, jobRun.reason()); object.setLong(atField, jobRun.at().toEpochMilli()); } private void toSlime(Change deploying, Cursor parentObject, String fieldName) { if (deploying.isEmpty()) return; Cursor object = parentObject.setObject(fieldName); if (deploying.platform().isPresent()) object.setString(versionField, deploying.platform().get().toString()); if (deploying.application().isPresent()) toSlime(deploying.application().get(), object); if (deploying.isPinned()) object.setBool(pinnedField, true); } private void toSlime(Map<HostName, RotationStatus> rotationStatus, Cursor array) { rotationStatus.forEach((hostname, status) -> { Cursor object = array.addObject(); object.setString("hostname", hostname.value()); object.setString("status", status.name()); }); } public Application fromSlime(Slime slime) { Inspector root = slime.get(); ApplicationId id = ApplicationId.fromSerializedForm(root.field(idField).asString()); Instant createdAt = Instant.ofEpochMilli(root.field(createdAtField).asLong()); DeploymentSpec deploymentSpec = DeploymentSpec.fromXml(root.field(deploymentSpecField).asString(), false); ValidationOverrides validationOverrides = ValidationOverrides.fromXml(root.field(validationOverridesField).asString()); List<Deployment> deployments = deploymentsFromSlime(root.field(deploymentsField)); DeploymentJobs deploymentJobs = deploymentJobsFromSlime(root.field(deploymentJobsField)); Change deploying = changeFromSlime(root.field(deployingField)); Change outstandingChange = changeFromSlime(root.field(outstandingChangeField)); Optional<IssueId> ownershipIssueId = optionalString(root.field(ownershipIssueIdField)).map(IssueId::from); Optional<User> owner = optionalString(root.field(ownerField)).map(User::from); OptionalInt majorVersion = optionalInteger(root.field(majorVersionField)); ApplicationMetrics metrics = new ApplicationMetrics(root.field(queryQualityField).asDouble(), root.field(writeQualityField).asDouble()); Optional<String> pemDeployKey = optionalString(root.field(pemDeployKeyField)); List<RotationId> rotations = rotationsFromSlime(root); Map<HostName, RotationStatus> rotationStatus = rotationStatusFromSlime(root.field(rotationStatusField)); return new Application(id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, deploying, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } private List<Deployment> deploymentsFromSlime(Inspector array) { List<Deployment> deployments = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> deployments.add(deploymentFromSlime(item))); return deployments; } private Deployment deploymentFromSlime(Inspector deploymentObject) { return new Deployment(zoneIdFromSlime(deploymentObject.field(zoneField)), applicationVersionFromSlime(deploymentObject.field(applicationPackageRevisionField)), Version.fromString(deploymentObject.field(versionField).asString()), Instant.ofEpochMilli(deploymentObject.field(deployTimeField).asLong()), clusterUtilsMapFromSlime(deploymentObject.field(clusterUtilsField)), clusterInfoMapFromSlime(deploymentObject.field(clusterInfoField)), deploymentMetricsFromSlime(deploymentObject.field(deploymentMetricsField)), DeploymentActivity.create(optionalInstant(deploymentObject.field(lastQueriedField)), optionalInstant(deploymentObject.field(lastWrittenField)), optionalDouble(deploymentObject.field(lastQueriesPerSecondField)), optionalDouble(deploymentObject.field(lastWritesPerSecondField)))); } private DeploymentMetrics deploymentMetricsFromSlime(Inspector object) { Optional<Instant> instant = object.field(deploymentMetricsUpdateTime).valid() ? Optional.of(Instant.ofEpochMilli(object.field(deploymentMetricsUpdateTime).asLong())) : Optional.empty(); return new DeploymentMetrics(object.field(deploymentMetricsQPSField).asDouble(), object.field(deploymentMetricsWPSField).asDouble(), object.field(deploymentMetricsDocsField).asDouble(), object.field(deploymentMetricsQueryLatencyField).asDouble(), object.field(deploymentMetricsWriteLatencyField).asDouble(), instant, deploymentWarningsFrom(object.field(deploymentMetricsWarningsField))); } private Map<DeploymentMetrics.Warning, Integer> deploymentWarningsFrom(Inspector object) { Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>(); object.traverse((ObjectTraverser) (name, value) -> warnings.put(DeploymentMetrics.Warning.valueOf(name), (int) value.asLong())); return Collections.unmodifiableMap(warnings); } private Map<HostName, RotationStatus> rotationStatusFromSlime(Inspector object) { if (!object.valid()) { return Collections.emptyMap(); } Map<HostName, RotationStatus> rotationStatus = new TreeMap<>(); object.traverse((ArrayTraverser) (idx, inspect) -> { HostName hostname = HostName.from(inspect.field("hostname").asString()); RotationStatus status = RotationStatus.valueOf(inspect.field("status").asString()); rotationStatus.put(hostname, status); }); return Collections.unmodifiableMap(rotationStatus); } private Map<ClusterSpec.Id, ClusterInfo> clusterInfoMapFromSlime (Inspector object) { Map<ClusterSpec.Id, ClusterInfo> map = new HashMap<>(); object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterInfoFromSlime(value))); return map; } private Map<ClusterSpec.Id, ClusterUtilization> clusterUtilsMapFromSlime(Inspector object) { Map<ClusterSpec.Id, ClusterUtilization> map = new HashMap<>(); object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterUtililzationFromSlime(value))); return map; } private ClusterUtilization clusterUtililzationFromSlime(Inspector object) { double cpu = object.field(clusterUtilsCpuField).asDouble(); double mem = object.field(clusterUtilsMemField).asDouble(); double disk = object.field(clusterUtilsDiskField).asDouble(); double diskBusy = object.field(clusterUtilsDiskBusyField).asDouble(); return new ClusterUtilization(mem, cpu, disk, diskBusy); } private ClusterInfo clusterInfoFromSlime(Inspector inspector) { String flavor = inspector.field(clusterInfoFlavorField).asString(); int cost = (int)inspector.field(clusterInfoCostField).asLong(); String type = inspector.field(clusterInfoTypeField).asString(); double flavorCpu = inspector.field(clusterInfoCpuField).asDouble(); double flavorMem = inspector.field(clusterInfoMemField).asDouble(); double flavorDisk = inspector.field(clusterInfoDiskField).asDouble(); List<String> hostnames = new ArrayList<>(); inspector.field(clusterInfoHostnamesField).traverse((ArrayTraverser)(int index, Inspector value) -> hostnames.add(value.asString())); return new ClusterInfo(flavor, cost, flavorCpu, flavorMem, flavorDisk, ClusterSpec.Type.from(type), hostnames); } private ZoneId zoneIdFromSlime(Inspector object) { return ZoneId.from(object.field(environmentField).asString(), object.field(regionField).asString()); } private ApplicationVersion applicationVersionFromSlime(Inspector object) { if ( ! object.valid()) return ApplicationVersion.unknown; OptionalLong applicationBuildNumber = optionalLong(object.field(applicationBuildNumberField)); Optional<SourceRevision> sourceRevision = sourceRevisionFromSlime(object.field(sourceRevisionField)); if ( ! sourceRevision.isPresent() || ! applicationBuildNumber.isPresent()) { return ApplicationVersion.unknown; } Optional<String> authorEmail = optionalString(object.field(authorEmailField)); Optional<Version> compileVersion = optionalString(object.field(compileVersionField)).map(Version::fromString); Optional<Instant> buildTime = optionalInstant(object.field(buildTimeField)); if ( ! authorEmail.isPresent()) return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong()); if ( ! compileVersion.isPresent() || ! buildTime.isPresent()) return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong(), authorEmail.get()); return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong(), authorEmail.get(), compileVersion.get(), buildTime.get()); } private Optional<SourceRevision> sourceRevisionFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new SourceRevision(object.field(repositoryField).asString(), object.field(branchField).asString(), object.field(commitField).asString())); } private DeploymentJobs deploymentJobsFromSlime(Inspector object) { OptionalLong projectId = optionalLong(object.field(projectIdField)); List<JobStatus> jobStatusList = jobStatusListFromSlime(object.field(jobStatusField)); Optional<IssueId> issueId = optionalString(object.field(issueIdField)).map(IssueId::from); boolean builtInternally = object.field(builtInternallyField).asBool(); return new DeploymentJobs(projectId, jobStatusList, issueId, builtInternally); } private Change changeFromSlime(Inspector object) { if ( ! object.valid()) return Change.empty(); Inspector versionFieldValue = object.field(versionField); Change change = Change.empty(); if (versionFieldValue.valid()) change = Change.of(Version.fromString(versionFieldValue.asString())); if (object.field(applicationBuildNumberField).valid()) change = change.with(applicationVersionFromSlime(object)); if (object.field(pinnedField).asBool()) change = change.withPin(); return change; } private List<JobStatus> jobStatusListFromSlime(Inspector array) { List<JobStatus> jobStatusList = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> jobStatusFromSlime(item).ifPresent(jobStatusList::add)); return jobStatusList; } private Optional<JobStatus> jobStatusFromSlime(Inspector object) { Optional<JobType> jobType = JobType.fromOptionalJobName(object.field(jobTypeField).asString()); if (! jobType.isPresent()) return Optional.empty(); Optional<JobError> jobError = Optional.empty(); if (object.field(errorField).valid()) jobError = Optional.of(JobError.valueOf(object.field(errorField).asString())); return Optional.of(new JobStatus(jobType.get(), jobError, jobRunFromSlime(object.field(lastTriggeredField)), jobRunFromSlime(object.field(lastCompletedField)), jobRunFromSlime(object.field(firstFailingField)), jobRunFromSlime(object.field(lastSuccessField)), optionalLong(object.field(pausedUntilField)))); } private Optional<JobStatus.JobRun> jobRunFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new JobStatus.JobRun(object.field(jobRunIdField).asLong(), new Version(object.field(versionField).asString()), applicationVersionFromSlime(object.field(revisionField)), optionalString(object.field(sourceVersionField)).map(Version::fromString), Optional.of(object.field(sourceApplicationField)).filter(Inspector::valid).map(this::applicationVersionFromSlime), object.field(reasonField).asString(), Instant.ofEpochMilli(object.field(atField).asLong()))); } private List<RotationId> rotationListFromSlime(Inspector field) { final var rotations = new ArrayList<RotationId>(); field.traverse((ArrayTraverser) (idx, inspector) -> { final var rotation = new RotationId(inspector.asString()); rotations.add(rotation); }); return rotations; } private Optional<RotationId> legacyRotationFromSlime(Inspector field) { return field.valid() ? optionalString(field).map(RotationId::new) : Optional.empty(); } private OptionalLong optionalLong(Inspector field) { return field.valid() ? OptionalLong.of(field.asLong()) : OptionalLong.empty(); } private OptionalInt optionalInteger(Inspector field) { return field.valid() ? OptionalInt.of((int) field.asLong()) : OptionalInt.empty(); } private OptionalDouble optionalDouble(Inspector field) { return field.valid() ? OptionalDouble.of(field.asDouble()) : OptionalDouble.empty(); } private Optional<String> optionalString(Inspector field) { return SlimeUtils.optionalString(field); } private Optional<Instant> optionalInstant(Inspector field) { OptionalLong value = optionalLong(field); return value.isPresent() ? Optional.of(Instant.ofEpochMilli(value.getAsLong())) : Optional.empty(); } }
Not needed when `rotations` becomes immutable.
public List<RotationId> rotations() { return Collections.unmodifiableList(rotations); }
return Collections.unmodifiableList(rotations);
public List<RotationId> rotations() { return rotations; }
class Application { private final ApplicationId id; private final Instant createdAt; private final DeploymentSpec deploymentSpec; private final ValidationOverrides validationOverrides; private final Map<ZoneId, Deployment> deployments; private final DeploymentJobs deploymentJobs; private final Change change; private final Change outstandingChange; private final Optional<IssueId> ownershipIssueId; private final Optional<User> owner; private final OptionalInt majorVersion; private final ApplicationMetrics metrics; private final Optional<String> pemDeployKey; private final List<RotationId> rotations; private final Map<HostName, RotationStatus> rotationStatus; /** Creates an empty application */ public Application(ApplicationId id, Instant now) { this(id, now, DeploymentSpec.empty, ValidationOverrides.empty, Collections.emptyMap(), new DeploymentJobs(OptionalLong.empty(), Collections.emptyList(), Optional.empty(), false), Change.empty(), Change.empty(), Optional.empty(), Optional.empty(), OptionalInt.empty(), new ApplicationMetrics(0, 0), Optional.empty(), Collections.emptyList(), Collections.emptyMap()); } /** Used from persistence layer: Do not use */ public Application(ApplicationId id, Instant createdAt, DeploymentSpec deploymentSpec, ValidationOverrides validationOverrides, List<Deployment> deployments, DeploymentJobs deploymentJobs, Change change, Change outstandingChange, Optional<IssueId> ownershipIssueId, Optional<User> owner, OptionalInt majorVersion, ApplicationMetrics metrics, Optional<String> pemDeployKey, List<RotationId> rotations, Map<HostName, RotationStatus> rotationStatus) { this(id, createdAt, deploymentSpec, validationOverrides, deployments.stream().collect(Collectors.toMap(Deployment::zone, Function.identity())), deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } Application(ApplicationId id, Instant createdAt, DeploymentSpec deploymentSpec, ValidationOverrides validationOverrides, Map<ZoneId, Deployment> deployments, DeploymentJobs deploymentJobs, Change change, Change outstandingChange, Optional<IssueId> ownershipIssueId, Optional<User> owner, OptionalInt majorVersion, ApplicationMetrics metrics, Optional<String> pemDeployKey, List<RotationId> rotations, Map<HostName, RotationStatus> rotationStatus) { this.id = Objects.requireNonNull(id, "id cannot be null"); this.createdAt = Objects.requireNonNull(createdAt, "instant of creation cannot be null"); this.deploymentSpec = Objects.requireNonNull(deploymentSpec, "deploymentSpec cannot be null"); this.validationOverrides = Objects.requireNonNull(validationOverrides, "validationOverrides cannot be null"); this.deployments = ImmutableMap.copyOf(Objects.requireNonNull(deployments, "deployments cannot be null")); this.deploymentJobs = Objects.requireNonNull(deploymentJobs, "deploymentJobs cannot be null"); this.change = Objects.requireNonNull(change, "change cannot be null"); this.outstandingChange = Objects.requireNonNull(outstandingChange, "outstandingChange cannot be null"); this.ownershipIssueId = Objects.requireNonNull(ownershipIssueId, "ownershipIssueId cannot be null"); this.owner = Objects.requireNonNull(owner, "owner cannot be null"); this.majorVersion = Objects.requireNonNull(majorVersion, "majorVersion cannot be null"); this.metrics = Objects.requireNonNull(metrics, "metrics cannot be null"); this.pemDeployKey = pemDeployKey; this.rotations = Objects.requireNonNull(rotations, "rotations cannot be null"); this.rotationStatus = ImmutableMap.copyOf(Objects.requireNonNull(rotationStatus, "rotationStatus cannot be null")); } public ApplicationId id() { return id; } public Instant createdAt() { return createdAt; } /** * Returns the last deployed deployment spec of this application, * or the empty deployment spec if it has never been deployed */ public DeploymentSpec deploymentSpec() { return deploymentSpec; } /** * Returns the last deployed validation overrides of this application, * or the empty validation overrides if it has never been deployed * (or was deployed with an empty/missing validation overrides) */ public ValidationOverrides validationOverrides() { return validationOverrides; } /** Returns an immutable map of the current deployments of this */ public Map<ZoneId, Deployment> deployments() { return deployments; } /** * Returns an immutable map of the current *production* deployments of this * (deployments also includes manually deployed environments) */ public Map<ZoneId, Deployment> productionDeployments() { return ImmutableMap.copyOf(deployments.values().stream() .filter(deployment -> deployment.zone().environment() == Environment.prod) .collect(Collectors.toMap(Deployment::zone, Function.identity()))); } public DeploymentJobs deploymentJobs() { return deploymentJobs; } /** * Returns base change for this application, i.e., the change that is deployed outside block windows. * This is empty when no change is currently under deployment. */ public Change change() { return change; } /** * Returns whether this has an outstanding change (in the source repository), which * has currently not started deploying (because a deployment is (or was) already in progress */ public Change outstandingChange() { return outstandingChange; } /** Returns ID of the last ownership issue filed for this */ public Optional<IssueId> ownershipIssueId() { return ownershipIssueId; } public Optional<User> owner() { return owner; } /** * Overrides the system major version for this application. This override takes effect if the deployment * spec does not specify a major version. */ public OptionalInt majorVersion() { return majorVersion; } /** Returns metrics for this */ public ApplicationMetrics metrics() { return metrics; } /** Returns activity for this */ public ApplicationActivity activity() { return ApplicationActivity.from(deployments.values()); } /** * Returns the oldest platform version this has deployed in a permanent zone (not test or staging). * * This is unfortunately quite similar to {@link ApplicationController * but this checks only what the controller has deployed to the production zones, while that checks the node repository * to see what's actually installed on each node. Thus, this is the right choice for, e.g., target Vespa versions for * new deployments, while that is the right choice for version to compile against. */ public Optional<Version> oldestDeployedPlatform() { return productionDeployments().values().stream() .map(Deployment::version) .min(Comparator.naturalOrder()); } /** * Returns the oldest application version this has deployed in a permanent zone (not test or staging). */ public Optional<ApplicationVersion> oldestDeployedApplication() { return productionDeployments().values().stream() .map(Deployment::applicationVersion) .min(Comparator.naturalOrder()); } /** Returns the global rotation id of this, if present */ /** Returns the default global endpoints for this in given system */ public EndpointList endpointsIn(SystemName system) { if (rotations.isEmpty()) return EndpointList.EMPTY; return EndpointList.defaultGlobal(id, system); } public Optional<String> pemDeployKey() { return pemDeployKey; } /** Returns the status of the global rotation assigned to this. Wil be empty if this does not have a global rotation. */ public Map<HostName, RotationStatus> rotationStatus() { return rotationStatus; } /** Returns the global rotation status of given deployment */ public RotationStatus rotationStatus(Deployment deployment) { return rotationStatus.entrySet().stream() .filter(kv -> kv.getKey().value().contains(deployment.zone().value())) .map(Map.Entry::getValue) .findFirst() .orElse(RotationStatus.unknown); } @Override public boolean equals(Object o) { if (this == o) return true; if (! (o instanceof Application)) return false; Application that = (Application) o; return id.equals(that.id); } @Override public int hashCode() { return id.hashCode(); } @Override public String toString() { return "application '" + id + "'"; } }
class Application { private final ApplicationId id; private final Instant createdAt; private final DeploymentSpec deploymentSpec; private final ValidationOverrides validationOverrides; private final Map<ZoneId, Deployment> deployments; private final DeploymentJobs deploymentJobs; private final Change change; private final Change outstandingChange; private final Optional<IssueId> ownershipIssueId; private final Optional<User> owner; private final OptionalInt majorVersion; private final ApplicationMetrics metrics; private final Optional<String> pemDeployKey; private final List<RotationId> rotations; private final Map<HostName, RotationStatus> rotationStatus; /** Creates an empty application */ public Application(ApplicationId id, Instant now) { this(id, now, DeploymentSpec.empty, ValidationOverrides.empty, Collections.emptyMap(), new DeploymentJobs(OptionalLong.empty(), Collections.emptyList(), Optional.empty(), false), Change.empty(), Change.empty(), Optional.empty(), Optional.empty(), OptionalInt.empty(), new ApplicationMetrics(0, 0), Optional.empty(), Collections.emptyList(), Collections.emptyMap()); } /** Used from persistence layer: Do not use */ public Application(ApplicationId id, Instant createdAt, DeploymentSpec deploymentSpec, ValidationOverrides validationOverrides, List<Deployment> deployments, DeploymentJobs deploymentJobs, Change change, Change outstandingChange, Optional<IssueId> ownershipIssueId, Optional<User> owner, OptionalInt majorVersion, ApplicationMetrics metrics, Optional<String> pemDeployKey, List<RotationId> rotations, Map<HostName, RotationStatus> rotationStatus) { this(id, createdAt, deploymentSpec, validationOverrides, deployments.stream().collect(Collectors.toMap(Deployment::zone, Function.identity())), deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } Application(ApplicationId id, Instant createdAt, DeploymentSpec deploymentSpec, ValidationOverrides validationOverrides, Map<ZoneId, Deployment> deployments, DeploymentJobs deploymentJobs, Change change, Change outstandingChange, Optional<IssueId> ownershipIssueId, Optional<User> owner, OptionalInt majorVersion, ApplicationMetrics metrics, Optional<String> pemDeployKey, List<RotationId> rotations, Map<HostName, RotationStatus> rotationStatus) { this.id = Objects.requireNonNull(id, "id cannot be null"); this.createdAt = Objects.requireNonNull(createdAt, "instant of creation cannot be null"); this.deploymentSpec = Objects.requireNonNull(deploymentSpec, "deploymentSpec cannot be null"); this.validationOverrides = Objects.requireNonNull(validationOverrides, "validationOverrides cannot be null"); this.deployments = ImmutableMap.copyOf(Objects.requireNonNull(deployments, "deployments cannot be null")); this.deploymentJobs = Objects.requireNonNull(deploymentJobs, "deploymentJobs cannot be null"); this.change = Objects.requireNonNull(change, "change cannot be null"); this.outstandingChange = Objects.requireNonNull(outstandingChange, "outstandingChange cannot be null"); this.ownershipIssueId = Objects.requireNonNull(ownershipIssueId, "ownershipIssueId cannot be null"); this.owner = Objects.requireNonNull(owner, "owner cannot be null"); this.majorVersion = Objects.requireNonNull(majorVersion, "majorVersion cannot be null"); this.metrics = Objects.requireNonNull(metrics, "metrics cannot be null"); this.pemDeployKey = pemDeployKey; this.rotations = List.copyOf(Objects.requireNonNull(rotations, "rotations cannot be null")); this.rotationStatus = ImmutableMap.copyOf(Objects.requireNonNull(rotationStatus, "rotationStatus cannot be null")); } public ApplicationId id() { return id; } public Instant createdAt() { return createdAt; } /** * Returns the last deployed deployment spec of this application, * or the empty deployment spec if it has never been deployed */ public DeploymentSpec deploymentSpec() { return deploymentSpec; } /** * Returns the last deployed validation overrides of this application, * or the empty validation overrides if it has never been deployed * (or was deployed with an empty/missing validation overrides) */ public ValidationOverrides validationOverrides() { return validationOverrides; } /** Returns an immutable map of the current deployments of this */ public Map<ZoneId, Deployment> deployments() { return deployments; } /** * Returns an immutable map of the current *production* deployments of this * (deployments also includes manually deployed environments) */ public Map<ZoneId, Deployment> productionDeployments() { return ImmutableMap.copyOf(deployments.values().stream() .filter(deployment -> deployment.zone().environment() == Environment.prod) .collect(Collectors.toMap(Deployment::zone, Function.identity()))); } public DeploymentJobs deploymentJobs() { return deploymentJobs; } /** * Returns base change for this application, i.e., the change that is deployed outside block windows. * This is empty when no change is currently under deployment. */ public Change change() { return change; } /** * Returns whether this has an outstanding change (in the source repository), which * has currently not started deploying (because a deployment is (or was) already in progress */ public Change outstandingChange() { return outstandingChange; } /** Returns ID of the last ownership issue filed for this */ public Optional<IssueId> ownershipIssueId() { return ownershipIssueId; } public Optional<User> owner() { return owner; } /** * Overrides the system major version for this application. This override takes effect if the deployment * spec does not specify a major version. */ public OptionalInt majorVersion() { return majorVersion; } /** Returns metrics for this */ public ApplicationMetrics metrics() { return metrics; } /** Returns activity for this */ public ApplicationActivity activity() { return ApplicationActivity.from(deployments.values()); } /** * Returns the oldest platform version this has deployed in a permanent zone (not test or staging). * * This is unfortunately quite similar to {@link ApplicationController * but this checks only what the controller has deployed to the production zones, while that checks the node repository * to see what's actually installed on each node. Thus, this is the right choice for, e.g., target Vespa versions for * new deployments, while that is the right choice for version to compile against. */ public Optional<Version> oldestDeployedPlatform() { return productionDeployments().values().stream() .map(Deployment::version) .min(Comparator.naturalOrder()); } /** * Returns the oldest application version this has deployed in a permanent zone (not test or staging). */ public Optional<ApplicationVersion> oldestDeployedApplication() { return productionDeployments().values().stream() .map(Deployment::applicationVersion) .min(Comparator.naturalOrder()); } /** Returns the global rotation id of this, if present */ /** Returns the default global endpoints for this in given system */ public EndpointList endpointsIn(SystemName system) { if (rotations.isEmpty()) return EndpointList.EMPTY; return EndpointList.defaultGlobal(id, system); } public Optional<String> pemDeployKey() { return pemDeployKey; } /** Returns the status of the global rotation assigned to this. Wil be empty if this does not have a global rotation. */ public Map<HostName, RotationStatus> rotationStatus() { return rotationStatus; } /** Returns the global rotation status of given deployment */ public RotationStatus rotationStatus(Deployment deployment) { return rotationStatus.entrySet().stream() .filter(kv -> kv.getKey().value().contains(deployment.zone().value())) .map(Map.Entry::getValue) .findFirst() .orElse(RotationStatus.unknown); } @Override public boolean equals(Object o) { if (this == o) return true; if (! (o instanceof Application)) return false; Application that = (Application) o; return id.equals(that.id); } @Override public int hashCode() { return id.hashCode(); } @Override public String toString() { return "application '" + id + "'"; } }
Consider using `field.traverse(ArrayTraverser)`.
private List<RotationId> rotationListFromSlime(Inspector field) { final var rotations = new ArrayList<RotationId>(); for (int i = 0; i < field.entries(); ++i) { var entry = field.entry(i); rotations.add(new RotationId(entry.asString())); } return rotations; }
for (int i = 0; i < field.entries(); ++i) {
private List<RotationId> rotationListFromSlime(Inspector field) { final var rotations = new ArrayList<RotationId>(); field.traverse((ArrayTraverser) (idx, inspector) -> { final var rotation = new RotationId(inspector.asString()); rotations.add(rotation); }); return rotations; }
class ApplicationSerializer { private final String idField = "id"; private final String createdAtField = "createdAt"; private final String deploymentSpecField = "deploymentSpecField"; private final String validationOverridesField = "validationOverrides"; private final String deploymentsField = "deployments"; private final String deploymentJobsField = "deploymentJobs"; private final String deployingField = "deployingField"; private final String pinnedField = "pinned"; private final String outstandingChangeField = "outstandingChangeField"; private final String ownershipIssueIdField = "ownershipIssueId"; private final String ownerField = "confirmedOwner"; private final String majorVersionField = "majorVersion"; private final String writeQualityField = "writeQuality"; private final String queryQualityField = "queryQuality"; private final String pemDeployKeyField = "pemDeployKey"; private final String rotationsField = "rotations"; private final String deprecatedRotationField = "rotation"; private final String rotationStatusField = "rotationStatus"; private final String zoneField = "zone"; private final String environmentField = "environment"; private final String regionField = "region"; private final String deployTimeField = "deployTime"; private final String applicationBuildNumberField = "applicationBuildNumber"; private final String applicationPackageRevisionField = "applicationPackageRevision"; private final String sourceRevisionField = "sourceRevision"; private final String repositoryField = "repositoryField"; private final String branchField = "branchField"; private final String commitField = "commitField"; private final String authorEmailField = "authorEmailField"; private final String compileVersionField = "compileVersion"; private final String buildTimeField = "buildTime"; private final String lastQueriedField = "lastQueried"; private final String lastWrittenField = "lastWritten"; private final String lastQueriesPerSecondField = "lastQueriesPerSecond"; private final String lastWritesPerSecondField = "lastWritesPerSecond"; private final String projectIdField = "projectId"; private final String jobStatusField = "jobStatus"; private final String issueIdField = "jiraIssueId"; private final String builtInternallyField = "builtInternally"; private final String jobTypeField = "jobType"; private final String errorField = "jobError"; private final String lastTriggeredField = "lastTriggered"; private final String lastCompletedField = "lastCompleted"; private final String firstFailingField = "firstFailing"; private final String lastSuccessField = "lastSuccess"; private final String pausedUntilField = "pausedUntil"; private final String jobRunIdField = "id"; private final String versionField = "version"; private final String revisionField = "revision"; private final String sourceVersionField = "sourceVersion"; private final String sourceApplicationField = "sourceRevision"; private final String reasonField = "reason"; private final String atField = "at"; private final String clusterInfoField = "clusterInfo"; private final String clusterInfoFlavorField = "flavor"; private final String clusterInfoCostField = "cost"; private final String clusterInfoCpuField = "flavorCpu"; private final String clusterInfoMemField = "flavorMem"; private final String clusterInfoDiskField = "flavorDisk"; private final String clusterInfoTypeField = "clusterType"; private final String clusterInfoHostnamesField = "hostnames"; private final String clusterUtilsField = "clusterUtils"; private final String clusterUtilsCpuField = "cpu"; private final String clusterUtilsMemField = "mem"; private final String clusterUtilsDiskField = "disk"; private final String clusterUtilsDiskBusyField = "diskbusy"; private final String deploymentMetricsField = "metrics"; private final String deploymentMetricsQPSField = "queriesPerSecond"; private final String deploymentMetricsWPSField = "writesPerSecond"; private final String deploymentMetricsDocsField = "documentCount"; private final String deploymentMetricsQueryLatencyField = "queryLatencyMillis"; private final String deploymentMetricsWriteLatencyField = "writeLatencyMillis"; private final String deploymentMetricsUpdateTime = "lastUpdated"; private final String deploymentMetricsWarningsField = "warnings"; public Slime toSlime(Application application) { Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString(idField, application.id().serializedForm()); root.setLong(createdAtField, application.createdAt().toEpochMilli()); root.setString(deploymentSpecField, application.deploymentSpec().xmlForm()); root.setString(validationOverridesField, application.validationOverrides().xmlForm()); deploymentsToSlime(application.deployments().values(), root.setArray(deploymentsField)); toSlime(application.deploymentJobs(), root.setObject(deploymentJobsField)); toSlime(application.change(), root, deployingField); toSlime(application.outstandingChange(), root, outstandingChangeField); application.ownershipIssueId().ifPresent(issueId -> root.setString(ownershipIssueIdField, issueId.value())); application.owner().ifPresent(owner -> root.setString(ownerField, owner.username())); application.majorVersion().ifPresent(majorVersion -> root.setLong(majorVersionField, majorVersion)); root.setDouble(queryQualityField, application.metrics().queryServiceQuality()); root.setDouble(writeQualityField, application.metrics().writeServiceQuality()); application.pemDeployKey().ifPresent(pemDeployKey -> root.setString(pemDeployKeyField, pemDeployKey)); Cursor rotations = root.setArray(rotationsField); application.rotations().forEach(rotation -> rotations.addString(rotation.asString())); toSlime(application.rotationStatus(), root.setArray(rotationStatusField)); return slime; } private void deploymentsToSlime(Collection<Deployment> deployments, Cursor array) { for (Deployment deployment : deployments) deploymentToSlime(deployment, array.addObject()); } private void deploymentToSlime(Deployment deployment, Cursor object) { zoneIdToSlime(deployment.zone(), object.setObject(zoneField)); object.setString(versionField, deployment.version().toString()); object.setLong(deployTimeField, deployment.at().toEpochMilli()); toSlime(deployment.applicationVersion(), object.setObject(applicationPackageRevisionField)); clusterInfoToSlime(deployment.clusterInfo(), object); clusterUtilsToSlime(deployment.clusterUtils(), object); deploymentMetricsToSlime(deployment.metrics(), object); deployment.activity().lastQueried().ifPresent(instant -> object.setLong(lastQueriedField, instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> object.setLong(lastWrittenField, instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> object.setDouble(lastQueriesPerSecondField, value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> object.setDouble(lastWritesPerSecondField, value)); } private void deploymentMetricsToSlime(DeploymentMetrics metrics, Cursor object) { Cursor root = object.setObject(deploymentMetricsField); root.setDouble(deploymentMetricsQPSField, metrics.queriesPerSecond()); root.setDouble(deploymentMetricsWPSField, metrics.writesPerSecond()); root.setDouble(deploymentMetricsDocsField, metrics.documentCount()); root.setDouble(deploymentMetricsQueryLatencyField, metrics.queryLatencyMillis()); root.setDouble(deploymentMetricsWriteLatencyField, metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> root.setLong(deploymentMetricsUpdateTime, instant.toEpochMilli())); if (!metrics.warnings().isEmpty()) { Cursor warningsObject = root.setObject(deploymentMetricsWarningsField); metrics.warnings().forEach((warning, count) -> warningsObject.setLong(warning.name(), count)); } } private void clusterInfoToSlime(Map<ClusterSpec.Id, ClusterInfo> clusters, Cursor object) { Cursor root = object.setObject(clusterInfoField); for (Map.Entry<ClusterSpec.Id, ClusterInfo> entry : clusters.entrySet()) { toSlime(entry.getValue(), root.setObject(entry.getKey().value())); } } private void toSlime(ClusterInfo info, Cursor object) { object.setString(clusterInfoFlavorField, info.getFlavor()); object.setLong(clusterInfoCostField, info.getFlavorCost()); object.setDouble(clusterInfoCpuField, info.getFlavorCPU()); object.setDouble(clusterInfoMemField, info.getFlavorMem()); object.setDouble(clusterInfoDiskField, info.getFlavorDisk()); object.setString(clusterInfoTypeField, info.getClusterType().name()); Cursor array = object.setArray(clusterInfoHostnamesField); for (String host : info.getHostnames()) { array.addString(host); } } private void clusterUtilsToSlime(Map<ClusterSpec.Id, ClusterUtilization> clusters, Cursor object) { Cursor root = object.setObject(clusterUtilsField); for (Map.Entry<ClusterSpec.Id, ClusterUtilization> entry : clusters.entrySet()) { toSlime(entry.getValue(), root.setObject(entry.getKey().value())); } } private void toSlime(ClusterUtilization utils, Cursor object) { object.setDouble(clusterUtilsCpuField, utils.getCpu()); object.setDouble(clusterUtilsMemField, utils.getMemory()); object.setDouble(clusterUtilsDiskField, utils.getDisk()); object.setDouble(clusterUtilsDiskBusyField, utils.getDiskBusy()); } private void zoneIdToSlime(ZoneId zone, Cursor object) { object.setString(environmentField, zone.environment().value()); object.setString(regionField, zone.region().value()); } private void toSlime(ApplicationVersion applicationVersion, Cursor object) { if (applicationVersion.buildNumber().isPresent() && applicationVersion.source().isPresent()) { object.setLong(applicationBuildNumberField, applicationVersion.buildNumber().getAsLong()); toSlime(applicationVersion.source().get(), object.setObject(sourceRevisionField)); applicationVersion.authorEmail().ifPresent(email -> object.setString(authorEmailField, email)); applicationVersion.compileVersion().ifPresent(version -> object.setString(compileVersionField, version.toString())); applicationVersion.buildTime().ifPresent(time -> object.setLong(buildTimeField, time.toEpochMilli())); } } private void toSlime(SourceRevision sourceRevision, Cursor object) { object.setString(repositoryField, sourceRevision.repository()); object.setString(branchField, sourceRevision.branch()); object.setString(commitField, sourceRevision.commit()); } private void toSlime(DeploymentJobs deploymentJobs, Cursor cursor) { deploymentJobs.projectId().ifPresent(projectId -> cursor.setLong(projectIdField, projectId)); jobStatusToSlime(deploymentJobs.jobStatus().values(), cursor.setArray(jobStatusField)); deploymentJobs.issueId().ifPresent(jiraIssueId -> cursor.setString(issueIdField, jiraIssueId.value())); cursor.setBool(builtInternallyField, deploymentJobs.deployedInternally()); } private void jobStatusToSlime(Collection<JobStatus> jobStatuses, Cursor jobStatusArray) { for (JobStatus jobStatus : jobStatuses) toSlime(jobStatus, jobStatusArray.addObject()); } private void toSlime(JobStatus jobStatus, Cursor object) { object.setString(jobTypeField, jobStatus.type().jobName()); if (jobStatus.jobError().isPresent()) object.setString(errorField, jobStatus.jobError().get().name()); jobStatus.lastTriggered().ifPresent(run -> jobRunToSlime(run, object, lastTriggeredField)); jobStatus.lastCompleted().ifPresent(run -> jobRunToSlime(run, object, lastCompletedField)); jobStatus.lastSuccess().ifPresent(run -> jobRunToSlime(run, object, lastSuccessField)); jobStatus.firstFailing().ifPresent(run -> jobRunToSlime(run, object, firstFailingField)); jobStatus.pausedUntil().ifPresent(until -> object.setLong(pausedUntilField, until)); } private void jobRunToSlime(JobStatus.JobRun jobRun, Cursor parent, String jobRunObjectName) { Cursor object = parent.setObject(jobRunObjectName); object.setLong(jobRunIdField, jobRun.id()); object.setString(versionField, jobRun.platform().toString()); toSlime(jobRun.application(), object.setObject(revisionField)); jobRun.sourcePlatform().ifPresent(version -> object.setString(sourceVersionField, version.toString())); jobRun.sourceApplication().ifPresent(version -> toSlime(version, object.setObject(sourceApplicationField))); object.setString(reasonField, jobRun.reason()); object.setLong(atField, jobRun.at().toEpochMilli()); } private void toSlime(Change deploying, Cursor parentObject, String fieldName) { if (deploying.isEmpty()) return; Cursor object = parentObject.setObject(fieldName); if (deploying.platform().isPresent()) object.setString(versionField, deploying.platform().get().toString()); if (deploying.application().isPresent()) toSlime(deploying.application().get(), object); if (deploying.isPinned()) object.setBool(pinnedField, true); } private void toSlime(Map<HostName, RotationStatus> rotationStatus, Cursor array) { rotationStatus.forEach((hostname, status) -> { Cursor object = array.addObject(); object.setString("hostname", hostname.value()); object.setString("status", status.name()); }); } public Application fromSlime(Slime slime) { Inspector root = slime.get(); ApplicationId id = ApplicationId.fromSerializedForm(root.field(idField).asString()); Instant createdAt = Instant.ofEpochMilli(root.field(createdAtField).asLong()); DeploymentSpec deploymentSpec = DeploymentSpec.fromXml(root.field(deploymentSpecField).asString(), false); ValidationOverrides validationOverrides = ValidationOverrides.fromXml(root.field(validationOverridesField).asString()); List<Deployment> deployments = deploymentsFromSlime(root.field(deploymentsField)); DeploymentJobs deploymentJobs = deploymentJobsFromSlime(root.field(deploymentJobsField)); Change deploying = changeFromSlime(root.field(deployingField)); Change outstandingChange = changeFromSlime(root.field(outstandingChangeField)); Optional<IssueId> ownershipIssueId = optionalString(root.field(ownershipIssueIdField)).map(IssueId::from); Optional<User> owner = optionalString(root.field(ownerField)).map(User::from); OptionalInt majorVersion = optionalInteger(root.field(majorVersionField)); ApplicationMetrics metrics = new ApplicationMetrics(root.field(queryQualityField).asDouble(), root.field(writeQualityField).asDouble()); Optional<String> pemDeployKey = optionalString(root.field(pemDeployKeyField)); List<RotationId> rotations = rotationsFromSlime(root); Map<HostName, RotationStatus> rotationStatus = rotationStatusFromSlime(root.field(rotationStatusField)); return new Application(id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, deploying, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } private List<Deployment> deploymentsFromSlime(Inspector array) { List<Deployment> deployments = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> deployments.add(deploymentFromSlime(item))); return deployments; } private Deployment deploymentFromSlime(Inspector deploymentObject) { return new Deployment(zoneIdFromSlime(deploymentObject.field(zoneField)), applicationVersionFromSlime(deploymentObject.field(applicationPackageRevisionField)), Version.fromString(deploymentObject.field(versionField).asString()), Instant.ofEpochMilli(deploymentObject.field(deployTimeField).asLong()), clusterUtilsMapFromSlime(deploymentObject.field(clusterUtilsField)), clusterInfoMapFromSlime(deploymentObject.field(clusterInfoField)), deploymentMetricsFromSlime(deploymentObject.field(deploymentMetricsField)), DeploymentActivity.create(optionalInstant(deploymentObject.field(lastQueriedField)), optionalInstant(deploymentObject.field(lastWrittenField)), optionalDouble(deploymentObject.field(lastQueriesPerSecondField)), optionalDouble(deploymentObject.field(lastWritesPerSecondField)))); } private DeploymentMetrics deploymentMetricsFromSlime(Inspector object) { Optional<Instant> instant = object.field(deploymentMetricsUpdateTime).valid() ? Optional.of(Instant.ofEpochMilli(object.field(deploymentMetricsUpdateTime).asLong())) : Optional.empty(); return new DeploymentMetrics(object.field(deploymentMetricsQPSField).asDouble(), object.field(deploymentMetricsWPSField).asDouble(), object.field(deploymentMetricsDocsField).asDouble(), object.field(deploymentMetricsQueryLatencyField).asDouble(), object.field(deploymentMetricsWriteLatencyField).asDouble(), instant, deploymentWarningsFrom(object.field(deploymentMetricsWarningsField))); } private Map<DeploymentMetrics.Warning, Integer> deploymentWarningsFrom(Inspector object) { Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>(); object.traverse((ObjectTraverser) (name, value) -> warnings.put(DeploymentMetrics.Warning.valueOf(name), (int) value.asLong())); return Collections.unmodifiableMap(warnings); } private Map<HostName, RotationStatus> rotationStatusFromSlime(Inspector object) { if (!object.valid()) { return Collections.emptyMap(); } Map<HostName, RotationStatus> rotationStatus = new TreeMap<>(); object.traverse((ArrayTraverser) (idx, inspect) -> { HostName hostname = HostName.from(inspect.field("hostname").asString()); RotationStatus status = RotationStatus.valueOf(inspect.field("status").asString()); rotationStatus.put(hostname, status); }); return Collections.unmodifiableMap(rotationStatus); } private Map<ClusterSpec.Id, ClusterInfo> clusterInfoMapFromSlime (Inspector object) { Map<ClusterSpec.Id, ClusterInfo> map = new HashMap<>(); object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterInfoFromSlime(value))); return map; } private Map<ClusterSpec.Id, ClusterUtilization> clusterUtilsMapFromSlime(Inspector object) { Map<ClusterSpec.Id, ClusterUtilization> map = new HashMap<>(); object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterUtililzationFromSlime(value))); return map; } private ClusterUtilization clusterUtililzationFromSlime(Inspector object) { double cpu = object.field(clusterUtilsCpuField).asDouble(); double mem = object.field(clusterUtilsMemField).asDouble(); double disk = object.field(clusterUtilsDiskField).asDouble(); double diskBusy = object.field(clusterUtilsDiskBusyField).asDouble(); return new ClusterUtilization(mem, cpu, disk, diskBusy); } private ClusterInfo clusterInfoFromSlime(Inspector inspector) { String flavor = inspector.field(clusterInfoFlavorField).asString(); int cost = (int)inspector.field(clusterInfoCostField).asLong(); String type = inspector.field(clusterInfoTypeField).asString(); double flavorCpu = inspector.field(clusterInfoCpuField).asDouble(); double flavorMem = inspector.field(clusterInfoMemField).asDouble(); double flavorDisk = inspector.field(clusterInfoDiskField).asDouble(); List<String> hostnames = new ArrayList<>(); inspector.field(clusterInfoHostnamesField).traverse((ArrayTraverser)(int index, Inspector value) -> hostnames.add(value.asString())); return new ClusterInfo(flavor, cost, flavorCpu, flavorMem, flavorDisk, ClusterSpec.Type.from(type), hostnames); } private ZoneId zoneIdFromSlime(Inspector object) { return ZoneId.from(object.field(environmentField).asString(), object.field(regionField).asString()); } private ApplicationVersion applicationVersionFromSlime(Inspector object) { if ( ! object.valid()) return ApplicationVersion.unknown; OptionalLong applicationBuildNumber = optionalLong(object.field(applicationBuildNumberField)); Optional<SourceRevision> sourceRevision = sourceRevisionFromSlime(object.field(sourceRevisionField)); if ( ! sourceRevision.isPresent() || ! applicationBuildNumber.isPresent()) { return ApplicationVersion.unknown; } Optional<String> authorEmail = optionalString(object.field(authorEmailField)); Optional<Version> compileVersion = optionalString(object.field(compileVersionField)).map(Version::fromString); Optional<Instant> buildTime = optionalInstant(object.field(buildTimeField)); if ( ! authorEmail.isPresent()) return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong()); if ( ! compileVersion.isPresent() || ! buildTime.isPresent()) return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong(), authorEmail.get()); return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong(), authorEmail.get(), compileVersion.get(), buildTime.get()); } private Optional<SourceRevision> sourceRevisionFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new SourceRevision(object.field(repositoryField).asString(), object.field(branchField).asString(), object.field(commitField).asString())); } private DeploymentJobs deploymentJobsFromSlime(Inspector object) { OptionalLong projectId = optionalLong(object.field(projectIdField)); List<JobStatus> jobStatusList = jobStatusListFromSlime(object.field(jobStatusField)); Optional<IssueId> issueId = optionalString(object.field(issueIdField)).map(IssueId::from); boolean builtInternally = object.field(builtInternallyField).asBool(); return new DeploymentJobs(projectId, jobStatusList, issueId, builtInternally); } private Change changeFromSlime(Inspector object) { if ( ! object.valid()) return Change.empty(); Inspector versionFieldValue = object.field(versionField); Change change = Change.empty(); if (versionFieldValue.valid()) change = Change.of(Version.fromString(versionFieldValue.asString())); if (object.field(applicationBuildNumberField).valid()) change = change.with(applicationVersionFromSlime(object)); if (object.field(pinnedField).asBool()) change = change.withPin(); return change; } private List<JobStatus> jobStatusListFromSlime(Inspector array) { List<JobStatus> jobStatusList = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> jobStatusFromSlime(item).ifPresent(jobStatusList::add)); return jobStatusList; } private Optional<JobStatus> jobStatusFromSlime(Inspector object) { Optional<JobType> jobType = JobType.fromOptionalJobName(object.field(jobTypeField).asString()); if (! jobType.isPresent()) return Optional.empty(); Optional<JobError> jobError = Optional.empty(); if (object.field(errorField).valid()) jobError = Optional.of(JobError.valueOf(object.field(errorField).asString())); return Optional.of(new JobStatus(jobType.get(), jobError, jobRunFromSlime(object.field(lastTriggeredField)), jobRunFromSlime(object.field(lastCompletedField)), jobRunFromSlime(object.field(firstFailingField)), jobRunFromSlime(object.field(lastSuccessField)), optionalLong(object.field(pausedUntilField)))); } private Optional<JobStatus.JobRun> jobRunFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new JobStatus.JobRun(object.field(jobRunIdField).asLong(), new Version(object.field(versionField).asString()), applicationVersionFromSlime(object.field(revisionField)), optionalString(object.field(sourceVersionField)).map(Version::fromString), Optional.of(object.field(sourceApplicationField)).filter(Inspector::valid).map(this::applicationVersionFromSlime), object.field(reasonField).asString(), Instant.ofEpochMilli(object.field(atField).asLong()))); } private List<RotationId> rotationsFromSlime(Inspector root) { final var rotations = rotationListFromSlime(root.field(rotationsField)); final var legacyRotation = legacyRotationFromSlime(root.field(deprecatedRotationField)); legacyRotation.ifPresent(rotations::add); return rotations; } private Optional<RotationId> legacyRotationFromSlime(Inspector field) { return field.valid() ? optionalString(field).map(RotationId::new) : Optional.empty(); } private OptionalLong optionalLong(Inspector field) { return field.valid() ? OptionalLong.of(field.asLong()) : OptionalLong.empty(); } private OptionalInt optionalInteger(Inspector field) { return field.valid() ? OptionalInt.of((int) field.asLong()) : OptionalInt.empty(); } private OptionalDouble optionalDouble(Inspector field) { return field.valid() ? OptionalDouble.of(field.asDouble()) : OptionalDouble.empty(); } private Optional<String> optionalString(Inspector field) { return SlimeUtils.optionalString(field); } private Optional<Instant> optionalInstant(Inspector field) { OptionalLong value = optionalLong(field); return value.isPresent() ? Optional.of(Instant.ofEpochMilli(value.getAsLong())) : Optional.empty(); } }
class ApplicationSerializer { private final String idField = "id"; private final String createdAtField = "createdAt"; private final String deploymentSpecField = "deploymentSpecField"; private final String validationOverridesField = "validationOverrides"; private final String deploymentsField = "deployments"; private final String deploymentJobsField = "deploymentJobs"; private final String deployingField = "deployingField"; private final String pinnedField = "pinned"; private final String outstandingChangeField = "outstandingChangeField"; private final String ownershipIssueIdField = "ownershipIssueId"; private final String ownerField = "confirmedOwner"; private final String majorVersionField = "majorVersion"; private final String writeQualityField = "writeQuality"; private final String queryQualityField = "queryQuality"; private final String pemDeployKeyField = "pemDeployKey"; private final String rotationsField = "endpoints"; private final String deprecatedRotationField = "rotation"; private final String rotationStatusField = "rotationStatus"; private final String zoneField = "zone"; private final String environmentField = "environment"; private final String regionField = "region"; private final String deployTimeField = "deployTime"; private final String applicationBuildNumberField = "applicationBuildNumber"; private final String applicationPackageRevisionField = "applicationPackageRevision"; private final String sourceRevisionField = "sourceRevision"; private final String repositoryField = "repositoryField"; private final String branchField = "branchField"; private final String commitField = "commitField"; private final String authorEmailField = "authorEmailField"; private final String compileVersionField = "compileVersion"; private final String buildTimeField = "buildTime"; private final String lastQueriedField = "lastQueried"; private final String lastWrittenField = "lastWritten"; private final String lastQueriesPerSecondField = "lastQueriesPerSecond"; private final String lastWritesPerSecondField = "lastWritesPerSecond"; private final String projectIdField = "projectId"; private final String jobStatusField = "jobStatus"; private final String issueIdField = "jiraIssueId"; private final String builtInternallyField = "builtInternally"; private final String jobTypeField = "jobType"; private final String errorField = "jobError"; private final String lastTriggeredField = "lastTriggered"; private final String lastCompletedField = "lastCompleted"; private final String firstFailingField = "firstFailing"; private final String lastSuccessField = "lastSuccess"; private final String pausedUntilField = "pausedUntil"; private final String jobRunIdField = "id"; private final String versionField = "version"; private final String revisionField = "revision"; private final String sourceVersionField = "sourceVersion"; private final String sourceApplicationField = "sourceRevision"; private final String reasonField = "reason"; private final String atField = "at"; private final String clusterInfoField = "clusterInfo"; private final String clusterInfoFlavorField = "flavor"; private final String clusterInfoCostField = "cost"; private final String clusterInfoCpuField = "flavorCpu"; private final String clusterInfoMemField = "flavorMem"; private final String clusterInfoDiskField = "flavorDisk"; private final String clusterInfoTypeField = "clusterType"; private final String clusterInfoHostnamesField = "hostnames"; private final String clusterUtilsField = "clusterUtils"; private final String clusterUtilsCpuField = "cpu"; private final String clusterUtilsMemField = "mem"; private final String clusterUtilsDiskField = "disk"; private final String clusterUtilsDiskBusyField = "diskbusy"; private final String deploymentMetricsField = "metrics"; private final String deploymentMetricsQPSField = "queriesPerSecond"; private final String deploymentMetricsWPSField = "writesPerSecond"; private final String deploymentMetricsDocsField = "documentCount"; private final String deploymentMetricsQueryLatencyField = "queryLatencyMillis"; private final String deploymentMetricsWriteLatencyField = "writeLatencyMillis"; private final String deploymentMetricsUpdateTime = "lastUpdated"; private final String deploymentMetricsWarningsField = "warnings"; public Slime toSlime(Application application) { Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString(idField, application.id().serializedForm()); root.setLong(createdAtField, application.createdAt().toEpochMilli()); root.setString(deploymentSpecField, application.deploymentSpec().xmlForm()); root.setString(validationOverridesField, application.validationOverrides().xmlForm()); deploymentsToSlime(application.deployments().values(), root.setArray(deploymentsField)); toSlime(application.deploymentJobs(), root.setObject(deploymentJobsField)); toSlime(application.change(), root, deployingField); toSlime(application.outstandingChange(), root, outstandingChangeField); application.ownershipIssueId().ifPresent(issueId -> root.setString(ownershipIssueIdField, issueId.value())); application.owner().ifPresent(owner -> root.setString(ownerField, owner.username())); application.majorVersion().ifPresent(majorVersion -> root.setLong(majorVersionField, majorVersion)); root.setDouble(queryQualityField, application.metrics().queryServiceQuality()); root.setDouble(writeQualityField, application.metrics().writeServiceQuality()); application.pemDeployKey().ifPresent(pemDeployKey -> root.setString(pemDeployKeyField, pemDeployKey)); Cursor rotations = root.setArray(rotationsField); application.rotations().forEach(rotation -> rotations.addString(rotation.asString())); toSlime(application.rotationStatus(), root.setArray(rotationStatusField)); return slime; } private void deploymentsToSlime(Collection<Deployment> deployments, Cursor array) { for (Deployment deployment : deployments) deploymentToSlime(deployment, array.addObject()); } private void deploymentToSlime(Deployment deployment, Cursor object) { zoneIdToSlime(deployment.zone(), object.setObject(zoneField)); object.setString(versionField, deployment.version().toString()); object.setLong(deployTimeField, deployment.at().toEpochMilli()); toSlime(deployment.applicationVersion(), object.setObject(applicationPackageRevisionField)); clusterInfoToSlime(deployment.clusterInfo(), object); clusterUtilsToSlime(deployment.clusterUtils(), object); deploymentMetricsToSlime(deployment.metrics(), object); deployment.activity().lastQueried().ifPresent(instant -> object.setLong(lastQueriedField, instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> object.setLong(lastWrittenField, instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> object.setDouble(lastQueriesPerSecondField, value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> object.setDouble(lastWritesPerSecondField, value)); } private void deploymentMetricsToSlime(DeploymentMetrics metrics, Cursor object) { Cursor root = object.setObject(deploymentMetricsField); root.setDouble(deploymentMetricsQPSField, metrics.queriesPerSecond()); root.setDouble(deploymentMetricsWPSField, metrics.writesPerSecond()); root.setDouble(deploymentMetricsDocsField, metrics.documentCount()); root.setDouble(deploymentMetricsQueryLatencyField, metrics.queryLatencyMillis()); root.setDouble(deploymentMetricsWriteLatencyField, metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> root.setLong(deploymentMetricsUpdateTime, instant.toEpochMilli())); if (!metrics.warnings().isEmpty()) { Cursor warningsObject = root.setObject(deploymentMetricsWarningsField); metrics.warnings().forEach((warning, count) -> warningsObject.setLong(warning.name(), count)); } } private void clusterInfoToSlime(Map<ClusterSpec.Id, ClusterInfo> clusters, Cursor object) { Cursor root = object.setObject(clusterInfoField); for (Map.Entry<ClusterSpec.Id, ClusterInfo> entry : clusters.entrySet()) { toSlime(entry.getValue(), root.setObject(entry.getKey().value())); } } private void toSlime(ClusterInfo info, Cursor object) { object.setString(clusterInfoFlavorField, info.getFlavor()); object.setLong(clusterInfoCostField, info.getFlavorCost()); object.setDouble(clusterInfoCpuField, info.getFlavorCPU()); object.setDouble(clusterInfoMemField, info.getFlavorMem()); object.setDouble(clusterInfoDiskField, info.getFlavorDisk()); object.setString(clusterInfoTypeField, info.getClusterType().name()); Cursor array = object.setArray(clusterInfoHostnamesField); for (String host : info.getHostnames()) { array.addString(host); } } private void clusterUtilsToSlime(Map<ClusterSpec.Id, ClusterUtilization> clusters, Cursor object) { Cursor root = object.setObject(clusterUtilsField); for (Map.Entry<ClusterSpec.Id, ClusterUtilization> entry : clusters.entrySet()) { toSlime(entry.getValue(), root.setObject(entry.getKey().value())); } } private void toSlime(ClusterUtilization utils, Cursor object) { object.setDouble(clusterUtilsCpuField, utils.getCpu()); object.setDouble(clusterUtilsMemField, utils.getMemory()); object.setDouble(clusterUtilsDiskField, utils.getDisk()); object.setDouble(clusterUtilsDiskBusyField, utils.getDiskBusy()); } private void zoneIdToSlime(ZoneId zone, Cursor object) { object.setString(environmentField, zone.environment().value()); object.setString(regionField, zone.region().value()); } private void toSlime(ApplicationVersion applicationVersion, Cursor object) { if (applicationVersion.buildNumber().isPresent() && applicationVersion.source().isPresent()) { object.setLong(applicationBuildNumberField, applicationVersion.buildNumber().getAsLong()); toSlime(applicationVersion.source().get(), object.setObject(sourceRevisionField)); applicationVersion.authorEmail().ifPresent(email -> object.setString(authorEmailField, email)); applicationVersion.compileVersion().ifPresent(version -> object.setString(compileVersionField, version.toString())); applicationVersion.buildTime().ifPresent(time -> object.setLong(buildTimeField, time.toEpochMilli())); } } private void toSlime(SourceRevision sourceRevision, Cursor object) { object.setString(repositoryField, sourceRevision.repository()); object.setString(branchField, sourceRevision.branch()); object.setString(commitField, sourceRevision.commit()); } private void toSlime(DeploymentJobs deploymentJobs, Cursor cursor) { deploymentJobs.projectId().ifPresent(projectId -> cursor.setLong(projectIdField, projectId)); jobStatusToSlime(deploymentJobs.jobStatus().values(), cursor.setArray(jobStatusField)); deploymentJobs.issueId().ifPresent(jiraIssueId -> cursor.setString(issueIdField, jiraIssueId.value())); cursor.setBool(builtInternallyField, deploymentJobs.deployedInternally()); } private void jobStatusToSlime(Collection<JobStatus> jobStatuses, Cursor jobStatusArray) { for (JobStatus jobStatus : jobStatuses) toSlime(jobStatus, jobStatusArray.addObject()); } private void toSlime(JobStatus jobStatus, Cursor object) { object.setString(jobTypeField, jobStatus.type().jobName()); if (jobStatus.jobError().isPresent()) object.setString(errorField, jobStatus.jobError().get().name()); jobStatus.lastTriggered().ifPresent(run -> jobRunToSlime(run, object, lastTriggeredField)); jobStatus.lastCompleted().ifPresent(run -> jobRunToSlime(run, object, lastCompletedField)); jobStatus.lastSuccess().ifPresent(run -> jobRunToSlime(run, object, lastSuccessField)); jobStatus.firstFailing().ifPresent(run -> jobRunToSlime(run, object, firstFailingField)); jobStatus.pausedUntil().ifPresent(until -> object.setLong(pausedUntilField, until)); } private void jobRunToSlime(JobStatus.JobRun jobRun, Cursor parent, String jobRunObjectName) { Cursor object = parent.setObject(jobRunObjectName); object.setLong(jobRunIdField, jobRun.id()); object.setString(versionField, jobRun.platform().toString()); toSlime(jobRun.application(), object.setObject(revisionField)); jobRun.sourcePlatform().ifPresent(version -> object.setString(sourceVersionField, version.toString())); jobRun.sourceApplication().ifPresent(version -> toSlime(version, object.setObject(sourceApplicationField))); object.setString(reasonField, jobRun.reason()); object.setLong(atField, jobRun.at().toEpochMilli()); } private void toSlime(Change deploying, Cursor parentObject, String fieldName) { if (deploying.isEmpty()) return; Cursor object = parentObject.setObject(fieldName); if (deploying.platform().isPresent()) object.setString(versionField, deploying.platform().get().toString()); if (deploying.application().isPresent()) toSlime(deploying.application().get(), object); if (deploying.isPinned()) object.setBool(pinnedField, true); } private void toSlime(Map<HostName, RotationStatus> rotationStatus, Cursor array) { rotationStatus.forEach((hostname, status) -> { Cursor object = array.addObject(); object.setString("hostname", hostname.value()); object.setString("status", status.name()); }); } public Application fromSlime(Slime slime) { Inspector root = slime.get(); ApplicationId id = ApplicationId.fromSerializedForm(root.field(idField).asString()); Instant createdAt = Instant.ofEpochMilli(root.field(createdAtField).asLong()); DeploymentSpec deploymentSpec = DeploymentSpec.fromXml(root.field(deploymentSpecField).asString(), false); ValidationOverrides validationOverrides = ValidationOverrides.fromXml(root.field(validationOverridesField).asString()); List<Deployment> deployments = deploymentsFromSlime(root.field(deploymentsField)); DeploymentJobs deploymentJobs = deploymentJobsFromSlime(root.field(deploymentJobsField)); Change deploying = changeFromSlime(root.field(deployingField)); Change outstandingChange = changeFromSlime(root.field(outstandingChangeField)); Optional<IssueId> ownershipIssueId = optionalString(root.field(ownershipIssueIdField)).map(IssueId::from); Optional<User> owner = optionalString(root.field(ownerField)).map(User::from); OptionalInt majorVersion = optionalInteger(root.field(majorVersionField)); ApplicationMetrics metrics = new ApplicationMetrics(root.field(queryQualityField).asDouble(), root.field(writeQualityField).asDouble()); Optional<String> pemDeployKey = optionalString(root.field(pemDeployKeyField)); List<RotationId> rotations = rotationsFromSlime(root); Map<HostName, RotationStatus> rotationStatus = rotationStatusFromSlime(root.field(rotationStatusField)); return new Application(id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, deploying, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } private List<Deployment> deploymentsFromSlime(Inspector array) { List<Deployment> deployments = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> deployments.add(deploymentFromSlime(item))); return deployments; } private Deployment deploymentFromSlime(Inspector deploymentObject) { return new Deployment(zoneIdFromSlime(deploymentObject.field(zoneField)), applicationVersionFromSlime(deploymentObject.field(applicationPackageRevisionField)), Version.fromString(deploymentObject.field(versionField).asString()), Instant.ofEpochMilli(deploymentObject.field(deployTimeField).asLong()), clusterUtilsMapFromSlime(deploymentObject.field(clusterUtilsField)), clusterInfoMapFromSlime(deploymentObject.field(clusterInfoField)), deploymentMetricsFromSlime(deploymentObject.field(deploymentMetricsField)), DeploymentActivity.create(optionalInstant(deploymentObject.field(lastQueriedField)), optionalInstant(deploymentObject.field(lastWrittenField)), optionalDouble(deploymentObject.field(lastQueriesPerSecondField)), optionalDouble(deploymentObject.field(lastWritesPerSecondField)))); } private DeploymentMetrics deploymentMetricsFromSlime(Inspector object) { Optional<Instant> instant = object.field(deploymentMetricsUpdateTime).valid() ? Optional.of(Instant.ofEpochMilli(object.field(deploymentMetricsUpdateTime).asLong())) : Optional.empty(); return new DeploymentMetrics(object.field(deploymentMetricsQPSField).asDouble(), object.field(deploymentMetricsWPSField).asDouble(), object.field(deploymentMetricsDocsField).asDouble(), object.field(deploymentMetricsQueryLatencyField).asDouble(), object.field(deploymentMetricsWriteLatencyField).asDouble(), instant, deploymentWarningsFrom(object.field(deploymentMetricsWarningsField))); } private Map<DeploymentMetrics.Warning, Integer> deploymentWarningsFrom(Inspector object) { Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>(); object.traverse((ObjectTraverser) (name, value) -> warnings.put(DeploymentMetrics.Warning.valueOf(name), (int) value.asLong())); return Collections.unmodifiableMap(warnings); } private Map<HostName, RotationStatus> rotationStatusFromSlime(Inspector object) { if (!object.valid()) { return Collections.emptyMap(); } Map<HostName, RotationStatus> rotationStatus = new TreeMap<>(); object.traverse((ArrayTraverser) (idx, inspect) -> { HostName hostname = HostName.from(inspect.field("hostname").asString()); RotationStatus status = RotationStatus.valueOf(inspect.field("status").asString()); rotationStatus.put(hostname, status); }); return Collections.unmodifiableMap(rotationStatus); } private Map<ClusterSpec.Id, ClusterInfo> clusterInfoMapFromSlime (Inspector object) { Map<ClusterSpec.Id, ClusterInfo> map = new HashMap<>(); object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterInfoFromSlime(value))); return map; } private Map<ClusterSpec.Id, ClusterUtilization> clusterUtilsMapFromSlime(Inspector object) { Map<ClusterSpec.Id, ClusterUtilization> map = new HashMap<>(); object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterUtililzationFromSlime(value))); return map; } private ClusterUtilization clusterUtililzationFromSlime(Inspector object) { double cpu = object.field(clusterUtilsCpuField).asDouble(); double mem = object.field(clusterUtilsMemField).asDouble(); double disk = object.field(clusterUtilsDiskField).asDouble(); double diskBusy = object.field(clusterUtilsDiskBusyField).asDouble(); return new ClusterUtilization(mem, cpu, disk, diskBusy); } private ClusterInfo clusterInfoFromSlime(Inspector inspector) { String flavor = inspector.field(clusterInfoFlavorField).asString(); int cost = (int)inspector.field(clusterInfoCostField).asLong(); String type = inspector.field(clusterInfoTypeField).asString(); double flavorCpu = inspector.field(clusterInfoCpuField).asDouble(); double flavorMem = inspector.field(clusterInfoMemField).asDouble(); double flavorDisk = inspector.field(clusterInfoDiskField).asDouble(); List<String> hostnames = new ArrayList<>(); inspector.field(clusterInfoHostnamesField).traverse((ArrayTraverser)(int index, Inspector value) -> hostnames.add(value.asString())); return new ClusterInfo(flavor, cost, flavorCpu, flavorMem, flavorDisk, ClusterSpec.Type.from(type), hostnames); } private ZoneId zoneIdFromSlime(Inspector object) { return ZoneId.from(object.field(environmentField).asString(), object.field(regionField).asString()); } private ApplicationVersion applicationVersionFromSlime(Inspector object) { if ( ! object.valid()) return ApplicationVersion.unknown; OptionalLong applicationBuildNumber = optionalLong(object.field(applicationBuildNumberField)); Optional<SourceRevision> sourceRevision = sourceRevisionFromSlime(object.field(sourceRevisionField)); if ( ! sourceRevision.isPresent() || ! applicationBuildNumber.isPresent()) { return ApplicationVersion.unknown; } Optional<String> authorEmail = optionalString(object.field(authorEmailField)); Optional<Version> compileVersion = optionalString(object.field(compileVersionField)).map(Version::fromString); Optional<Instant> buildTime = optionalInstant(object.field(buildTimeField)); if ( ! authorEmail.isPresent()) return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong()); if ( ! compileVersion.isPresent() || ! buildTime.isPresent()) return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong(), authorEmail.get()); return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong(), authorEmail.get(), compileVersion.get(), buildTime.get()); } private Optional<SourceRevision> sourceRevisionFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new SourceRevision(object.field(repositoryField).asString(), object.field(branchField).asString(), object.field(commitField).asString())); } private DeploymentJobs deploymentJobsFromSlime(Inspector object) { OptionalLong projectId = optionalLong(object.field(projectIdField)); List<JobStatus> jobStatusList = jobStatusListFromSlime(object.field(jobStatusField)); Optional<IssueId> issueId = optionalString(object.field(issueIdField)).map(IssueId::from); boolean builtInternally = object.field(builtInternallyField).asBool(); return new DeploymentJobs(projectId, jobStatusList, issueId, builtInternally); } private Change changeFromSlime(Inspector object) { if ( ! object.valid()) return Change.empty(); Inspector versionFieldValue = object.field(versionField); Change change = Change.empty(); if (versionFieldValue.valid()) change = Change.of(Version.fromString(versionFieldValue.asString())); if (object.field(applicationBuildNumberField).valid()) change = change.with(applicationVersionFromSlime(object)); if (object.field(pinnedField).asBool()) change = change.withPin(); return change; } private List<JobStatus> jobStatusListFromSlime(Inspector array) { List<JobStatus> jobStatusList = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> jobStatusFromSlime(item).ifPresent(jobStatusList::add)); return jobStatusList; } private Optional<JobStatus> jobStatusFromSlime(Inspector object) { Optional<JobType> jobType = JobType.fromOptionalJobName(object.field(jobTypeField).asString()); if (! jobType.isPresent()) return Optional.empty(); Optional<JobError> jobError = Optional.empty(); if (object.field(errorField).valid()) jobError = Optional.of(JobError.valueOf(object.field(errorField).asString())); return Optional.of(new JobStatus(jobType.get(), jobError, jobRunFromSlime(object.field(lastTriggeredField)), jobRunFromSlime(object.field(lastCompletedField)), jobRunFromSlime(object.field(firstFailingField)), jobRunFromSlime(object.field(lastSuccessField)), optionalLong(object.field(pausedUntilField)))); } private Optional<JobStatus.JobRun> jobRunFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new JobStatus.JobRun(object.field(jobRunIdField).asLong(), new Version(object.field(versionField).asString()), applicationVersionFromSlime(object.field(revisionField)), optionalString(object.field(sourceVersionField)).map(Version::fromString), Optional.of(object.field(sourceApplicationField)).filter(Inspector::valid).map(this::applicationVersionFromSlime), object.field(reasonField).asString(), Instant.ofEpochMilli(object.field(atField).asLong()))); } private List<RotationId> rotationsFromSlime(Inspector root) { final var rotations = rotationListFromSlime(root.field(rotationsField)); final var legacyRotation = legacyRotationFromSlime(root.field(deprecatedRotationField)); legacyRotation.ifPresent(rotations::add); return rotations; } private Optional<RotationId> legacyRotationFromSlime(Inspector field) { return field.valid() ? optionalString(field).map(RotationId::new) : Optional.empty(); } private OptionalLong optionalLong(Inspector field) { return field.valid() ? OptionalLong.of(field.asLong()) : OptionalLong.empty(); } private OptionalInt optionalInteger(Inspector field) { return field.valid() ? OptionalInt.of((int) field.asLong()) : OptionalInt.empty(); } private OptionalDouble optionalDouble(Inspector field) { return field.valid() ? OptionalDouble.of(field.asDouble()) : OptionalDouble.empty(); } private Optional<String> optionalString(Inspector field) { return SlimeUtils.optionalString(field); } private Optional<Instant> optionalInstant(Inspector field) { OptionalLong value = optionalLong(field); return value.isPresent() ? Optional.of(Instant.ofEpochMilli(value.getAsLong())) : Optional.empty(); } }
Done.
private List<RotationId> rotationsFromSlime(Inspector root) { final var rotations = rotationListFromSlime(root.field(rotationsField)); final var legacyRotation = legacyRotationFromSlime(root.field(deprecatedRotationField)); legacyRotation.ifPresent(rotations::add); return rotations; }
final var legacyRotation = legacyRotationFromSlime(root.field(deprecatedRotationField));
private List<RotationId> rotationsFromSlime(Inspector root) { final var rotations = rotationListFromSlime(root.field(rotationsField)); final var legacyRotation = legacyRotationFromSlime(root.field(deprecatedRotationField)); legacyRotation.ifPresent(rotations::add); return rotations; }
class ApplicationSerializer { private final String idField = "id"; private final String createdAtField = "createdAt"; private final String deploymentSpecField = "deploymentSpecField"; private final String validationOverridesField = "validationOverrides"; private final String deploymentsField = "deployments"; private final String deploymentJobsField = "deploymentJobs"; private final String deployingField = "deployingField"; private final String pinnedField = "pinned"; private final String outstandingChangeField = "outstandingChangeField"; private final String ownershipIssueIdField = "ownershipIssueId"; private final String ownerField = "confirmedOwner"; private final String majorVersionField = "majorVersion"; private final String writeQualityField = "writeQuality"; private final String queryQualityField = "queryQuality"; private final String pemDeployKeyField = "pemDeployKey"; private final String rotationsField = "rotations"; private final String deprecatedRotationField = "rotation"; private final String rotationStatusField = "rotationStatus"; private final String zoneField = "zone"; private final String environmentField = "environment"; private final String regionField = "region"; private final String deployTimeField = "deployTime"; private final String applicationBuildNumberField = "applicationBuildNumber"; private final String applicationPackageRevisionField = "applicationPackageRevision"; private final String sourceRevisionField = "sourceRevision"; private final String repositoryField = "repositoryField"; private final String branchField = "branchField"; private final String commitField = "commitField"; private final String authorEmailField = "authorEmailField"; private final String compileVersionField = "compileVersion"; private final String buildTimeField = "buildTime"; private final String lastQueriedField = "lastQueried"; private final String lastWrittenField = "lastWritten"; private final String lastQueriesPerSecondField = "lastQueriesPerSecond"; private final String lastWritesPerSecondField = "lastWritesPerSecond"; private final String projectIdField = "projectId"; private final String jobStatusField = "jobStatus"; private final String issueIdField = "jiraIssueId"; private final String builtInternallyField = "builtInternally"; private final String jobTypeField = "jobType"; private final String errorField = "jobError"; private final String lastTriggeredField = "lastTriggered"; private final String lastCompletedField = "lastCompleted"; private final String firstFailingField = "firstFailing"; private final String lastSuccessField = "lastSuccess"; private final String pausedUntilField = "pausedUntil"; private final String jobRunIdField = "id"; private final String versionField = "version"; private final String revisionField = "revision"; private final String sourceVersionField = "sourceVersion"; private final String sourceApplicationField = "sourceRevision"; private final String reasonField = "reason"; private final String atField = "at"; private final String clusterInfoField = "clusterInfo"; private final String clusterInfoFlavorField = "flavor"; private final String clusterInfoCostField = "cost"; private final String clusterInfoCpuField = "flavorCpu"; private final String clusterInfoMemField = "flavorMem"; private final String clusterInfoDiskField = "flavorDisk"; private final String clusterInfoTypeField = "clusterType"; private final String clusterInfoHostnamesField = "hostnames"; private final String clusterUtilsField = "clusterUtils"; private final String clusterUtilsCpuField = "cpu"; private final String clusterUtilsMemField = "mem"; private final String clusterUtilsDiskField = "disk"; private final String clusterUtilsDiskBusyField = "diskbusy"; private final String deploymentMetricsField = "metrics"; private final String deploymentMetricsQPSField = "queriesPerSecond"; private final String deploymentMetricsWPSField = "writesPerSecond"; private final String deploymentMetricsDocsField = "documentCount"; private final String deploymentMetricsQueryLatencyField = "queryLatencyMillis"; private final String deploymentMetricsWriteLatencyField = "writeLatencyMillis"; private final String deploymentMetricsUpdateTime = "lastUpdated"; private final String deploymentMetricsWarningsField = "warnings"; public Slime toSlime(Application application) { Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString(idField, application.id().serializedForm()); root.setLong(createdAtField, application.createdAt().toEpochMilli()); root.setString(deploymentSpecField, application.deploymentSpec().xmlForm()); root.setString(validationOverridesField, application.validationOverrides().xmlForm()); deploymentsToSlime(application.deployments().values(), root.setArray(deploymentsField)); toSlime(application.deploymentJobs(), root.setObject(deploymentJobsField)); toSlime(application.change(), root, deployingField); toSlime(application.outstandingChange(), root, outstandingChangeField); application.ownershipIssueId().ifPresent(issueId -> root.setString(ownershipIssueIdField, issueId.value())); application.owner().ifPresent(owner -> root.setString(ownerField, owner.username())); application.majorVersion().ifPresent(majorVersion -> root.setLong(majorVersionField, majorVersion)); root.setDouble(queryQualityField, application.metrics().queryServiceQuality()); root.setDouble(writeQualityField, application.metrics().writeServiceQuality()); application.pemDeployKey().ifPresent(pemDeployKey -> root.setString(pemDeployKeyField, pemDeployKey)); Cursor rotations = root.setArray(rotationsField); application.rotations().forEach(rotation -> rotations.addString(rotation.asString())); toSlime(application.rotationStatus(), root.setArray(rotationStatusField)); return slime; } private void deploymentsToSlime(Collection<Deployment> deployments, Cursor array) { for (Deployment deployment : deployments) deploymentToSlime(deployment, array.addObject()); } private void deploymentToSlime(Deployment deployment, Cursor object) { zoneIdToSlime(deployment.zone(), object.setObject(zoneField)); object.setString(versionField, deployment.version().toString()); object.setLong(deployTimeField, deployment.at().toEpochMilli()); toSlime(deployment.applicationVersion(), object.setObject(applicationPackageRevisionField)); clusterInfoToSlime(deployment.clusterInfo(), object); clusterUtilsToSlime(deployment.clusterUtils(), object); deploymentMetricsToSlime(deployment.metrics(), object); deployment.activity().lastQueried().ifPresent(instant -> object.setLong(lastQueriedField, instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> object.setLong(lastWrittenField, instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> object.setDouble(lastQueriesPerSecondField, value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> object.setDouble(lastWritesPerSecondField, value)); } private void deploymentMetricsToSlime(DeploymentMetrics metrics, Cursor object) { Cursor root = object.setObject(deploymentMetricsField); root.setDouble(deploymentMetricsQPSField, metrics.queriesPerSecond()); root.setDouble(deploymentMetricsWPSField, metrics.writesPerSecond()); root.setDouble(deploymentMetricsDocsField, metrics.documentCount()); root.setDouble(deploymentMetricsQueryLatencyField, metrics.queryLatencyMillis()); root.setDouble(deploymentMetricsWriteLatencyField, metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> root.setLong(deploymentMetricsUpdateTime, instant.toEpochMilli())); if (!metrics.warnings().isEmpty()) { Cursor warningsObject = root.setObject(deploymentMetricsWarningsField); metrics.warnings().forEach((warning, count) -> warningsObject.setLong(warning.name(), count)); } } private void clusterInfoToSlime(Map<ClusterSpec.Id, ClusterInfo> clusters, Cursor object) { Cursor root = object.setObject(clusterInfoField); for (Map.Entry<ClusterSpec.Id, ClusterInfo> entry : clusters.entrySet()) { toSlime(entry.getValue(), root.setObject(entry.getKey().value())); } } private void toSlime(ClusterInfo info, Cursor object) { object.setString(clusterInfoFlavorField, info.getFlavor()); object.setLong(clusterInfoCostField, info.getFlavorCost()); object.setDouble(clusterInfoCpuField, info.getFlavorCPU()); object.setDouble(clusterInfoMemField, info.getFlavorMem()); object.setDouble(clusterInfoDiskField, info.getFlavorDisk()); object.setString(clusterInfoTypeField, info.getClusterType().name()); Cursor array = object.setArray(clusterInfoHostnamesField); for (String host : info.getHostnames()) { array.addString(host); } } private void clusterUtilsToSlime(Map<ClusterSpec.Id, ClusterUtilization> clusters, Cursor object) { Cursor root = object.setObject(clusterUtilsField); for (Map.Entry<ClusterSpec.Id, ClusterUtilization> entry : clusters.entrySet()) { toSlime(entry.getValue(), root.setObject(entry.getKey().value())); } } private void toSlime(ClusterUtilization utils, Cursor object) { object.setDouble(clusterUtilsCpuField, utils.getCpu()); object.setDouble(clusterUtilsMemField, utils.getMemory()); object.setDouble(clusterUtilsDiskField, utils.getDisk()); object.setDouble(clusterUtilsDiskBusyField, utils.getDiskBusy()); } private void zoneIdToSlime(ZoneId zone, Cursor object) { object.setString(environmentField, zone.environment().value()); object.setString(regionField, zone.region().value()); } private void toSlime(ApplicationVersion applicationVersion, Cursor object) { if (applicationVersion.buildNumber().isPresent() && applicationVersion.source().isPresent()) { object.setLong(applicationBuildNumberField, applicationVersion.buildNumber().getAsLong()); toSlime(applicationVersion.source().get(), object.setObject(sourceRevisionField)); applicationVersion.authorEmail().ifPresent(email -> object.setString(authorEmailField, email)); applicationVersion.compileVersion().ifPresent(version -> object.setString(compileVersionField, version.toString())); applicationVersion.buildTime().ifPresent(time -> object.setLong(buildTimeField, time.toEpochMilli())); } } private void toSlime(SourceRevision sourceRevision, Cursor object) { object.setString(repositoryField, sourceRevision.repository()); object.setString(branchField, sourceRevision.branch()); object.setString(commitField, sourceRevision.commit()); } private void toSlime(DeploymentJobs deploymentJobs, Cursor cursor) { deploymentJobs.projectId().ifPresent(projectId -> cursor.setLong(projectIdField, projectId)); jobStatusToSlime(deploymentJobs.jobStatus().values(), cursor.setArray(jobStatusField)); deploymentJobs.issueId().ifPresent(jiraIssueId -> cursor.setString(issueIdField, jiraIssueId.value())); cursor.setBool(builtInternallyField, deploymentJobs.deployedInternally()); } private void jobStatusToSlime(Collection<JobStatus> jobStatuses, Cursor jobStatusArray) { for (JobStatus jobStatus : jobStatuses) toSlime(jobStatus, jobStatusArray.addObject()); } private void toSlime(JobStatus jobStatus, Cursor object) { object.setString(jobTypeField, jobStatus.type().jobName()); if (jobStatus.jobError().isPresent()) object.setString(errorField, jobStatus.jobError().get().name()); jobStatus.lastTriggered().ifPresent(run -> jobRunToSlime(run, object, lastTriggeredField)); jobStatus.lastCompleted().ifPresent(run -> jobRunToSlime(run, object, lastCompletedField)); jobStatus.lastSuccess().ifPresent(run -> jobRunToSlime(run, object, lastSuccessField)); jobStatus.firstFailing().ifPresent(run -> jobRunToSlime(run, object, firstFailingField)); jobStatus.pausedUntil().ifPresent(until -> object.setLong(pausedUntilField, until)); } private void jobRunToSlime(JobStatus.JobRun jobRun, Cursor parent, String jobRunObjectName) { Cursor object = parent.setObject(jobRunObjectName); object.setLong(jobRunIdField, jobRun.id()); object.setString(versionField, jobRun.platform().toString()); toSlime(jobRun.application(), object.setObject(revisionField)); jobRun.sourcePlatform().ifPresent(version -> object.setString(sourceVersionField, version.toString())); jobRun.sourceApplication().ifPresent(version -> toSlime(version, object.setObject(sourceApplicationField))); object.setString(reasonField, jobRun.reason()); object.setLong(atField, jobRun.at().toEpochMilli()); } private void toSlime(Change deploying, Cursor parentObject, String fieldName) { if (deploying.isEmpty()) return; Cursor object = parentObject.setObject(fieldName); if (deploying.platform().isPresent()) object.setString(versionField, deploying.platform().get().toString()); if (deploying.application().isPresent()) toSlime(deploying.application().get(), object); if (deploying.isPinned()) object.setBool(pinnedField, true); } private void toSlime(Map<HostName, RotationStatus> rotationStatus, Cursor array) { rotationStatus.forEach((hostname, status) -> { Cursor object = array.addObject(); object.setString("hostname", hostname.value()); object.setString("status", status.name()); }); } public Application fromSlime(Slime slime) { Inspector root = slime.get(); ApplicationId id = ApplicationId.fromSerializedForm(root.field(idField).asString()); Instant createdAt = Instant.ofEpochMilli(root.field(createdAtField).asLong()); DeploymentSpec deploymentSpec = DeploymentSpec.fromXml(root.field(deploymentSpecField).asString(), false); ValidationOverrides validationOverrides = ValidationOverrides.fromXml(root.field(validationOverridesField).asString()); List<Deployment> deployments = deploymentsFromSlime(root.field(deploymentsField)); DeploymentJobs deploymentJobs = deploymentJobsFromSlime(root.field(deploymentJobsField)); Change deploying = changeFromSlime(root.field(deployingField)); Change outstandingChange = changeFromSlime(root.field(outstandingChangeField)); Optional<IssueId> ownershipIssueId = optionalString(root.field(ownershipIssueIdField)).map(IssueId::from); Optional<User> owner = optionalString(root.field(ownerField)).map(User::from); OptionalInt majorVersion = optionalInteger(root.field(majorVersionField)); ApplicationMetrics metrics = new ApplicationMetrics(root.field(queryQualityField).asDouble(), root.field(writeQualityField).asDouble()); Optional<String> pemDeployKey = optionalString(root.field(pemDeployKeyField)); List<RotationId> rotations = rotationsFromSlime(root); Map<HostName, RotationStatus> rotationStatus = rotationStatusFromSlime(root.field(rotationStatusField)); return new Application(id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, deploying, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } private List<Deployment> deploymentsFromSlime(Inspector array) { List<Deployment> deployments = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> deployments.add(deploymentFromSlime(item))); return deployments; } private Deployment deploymentFromSlime(Inspector deploymentObject) { return new Deployment(zoneIdFromSlime(deploymentObject.field(zoneField)), applicationVersionFromSlime(deploymentObject.field(applicationPackageRevisionField)), Version.fromString(deploymentObject.field(versionField).asString()), Instant.ofEpochMilli(deploymentObject.field(deployTimeField).asLong()), clusterUtilsMapFromSlime(deploymentObject.field(clusterUtilsField)), clusterInfoMapFromSlime(deploymentObject.field(clusterInfoField)), deploymentMetricsFromSlime(deploymentObject.field(deploymentMetricsField)), DeploymentActivity.create(optionalInstant(deploymentObject.field(lastQueriedField)), optionalInstant(deploymentObject.field(lastWrittenField)), optionalDouble(deploymentObject.field(lastQueriesPerSecondField)), optionalDouble(deploymentObject.field(lastWritesPerSecondField)))); } private DeploymentMetrics deploymentMetricsFromSlime(Inspector object) { Optional<Instant> instant = object.field(deploymentMetricsUpdateTime).valid() ? Optional.of(Instant.ofEpochMilli(object.field(deploymentMetricsUpdateTime).asLong())) : Optional.empty(); return new DeploymentMetrics(object.field(deploymentMetricsQPSField).asDouble(), object.field(deploymentMetricsWPSField).asDouble(), object.field(deploymentMetricsDocsField).asDouble(), object.field(deploymentMetricsQueryLatencyField).asDouble(), object.field(deploymentMetricsWriteLatencyField).asDouble(), instant, deploymentWarningsFrom(object.field(deploymentMetricsWarningsField))); } private Map<DeploymentMetrics.Warning, Integer> deploymentWarningsFrom(Inspector object) { Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>(); object.traverse((ObjectTraverser) (name, value) -> warnings.put(DeploymentMetrics.Warning.valueOf(name), (int) value.asLong())); return Collections.unmodifiableMap(warnings); } private Map<HostName, RotationStatus> rotationStatusFromSlime(Inspector object) { if (!object.valid()) { return Collections.emptyMap(); } Map<HostName, RotationStatus> rotationStatus = new TreeMap<>(); object.traverse((ArrayTraverser) (idx, inspect) -> { HostName hostname = HostName.from(inspect.field("hostname").asString()); RotationStatus status = RotationStatus.valueOf(inspect.field("status").asString()); rotationStatus.put(hostname, status); }); return Collections.unmodifiableMap(rotationStatus); } private Map<ClusterSpec.Id, ClusterInfo> clusterInfoMapFromSlime (Inspector object) { Map<ClusterSpec.Id, ClusterInfo> map = new HashMap<>(); object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterInfoFromSlime(value))); return map; } private Map<ClusterSpec.Id, ClusterUtilization> clusterUtilsMapFromSlime(Inspector object) { Map<ClusterSpec.Id, ClusterUtilization> map = new HashMap<>(); object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterUtililzationFromSlime(value))); return map; } private ClusterUtilization clusterUtililzationFromSlime(Inspector object) { double cpu = object.field(clusterUtilsCpuField).asDouble(); double mem = object.field(clusterUtilsMemField).asDouble(); double disk = object.field(clusterUtilsDiskField).asDouble(); double diskBusy = object.field(clusterUtilsDiskBusyField).asDouble(); return new ClusterUtilization(mem, cpu, disk, diskBusy); } private ClusterInfo clusterInfoFromSlime(Inspector inspector) { String flavor = inspector.field(clusterInfoFlavorField).asString(); int cost = (int)inspector.field(clusterInfoCostField).asLong(); String type = inspector.field(clusterInfoTypeField).asString(); double flavorCpu = inspector.field(clusterInfoCpuField).asDouble(); double flavorMem = inspector.field(clusterInfoMemField).asDouble(); double flavorDisk = inspector.field(clusterInfoDiskField).asDouble(); List<String> hostnames = new ArrayList<>(); inspector.field(clusterInfoHostnamesField).traverse((ArrayTraverser)(int index, Inspector value) -> hostnames.add(value.asString())); return new ClusterInfo(flavor, cost, flavorCpu, flavorMem, flavorDisk, ClusterSpec.Type.from(type), hostnames); } private ZoneId zoneIdFromSlime(Inspector object) { return ZoneId.from(object.field(environmentField).asString(), object.field(regionField).asString()); } private ApplicationVersion applicationVersionFromSlime(Inspector object) { if ( ! object.valid()) return ApplicationVersion.unknown; OptionalLong applicationBuildNumber = optionalLong(object.field(applicationBuildNumberField)); Optional<SourceRevision> sourceRevision = sourceRevisionFromSlime(object.field(sourceRevisionField)); if ( ! sourceRevision.isPresent() || ! applicationBuildNumber.isPresent()) { return ApplicationVersion.unknown; } Optional<String> authorEmail = optionalString(object.field(authorEmailField)); Optional<Version> compileVersion = optionalString(object.field(compileVersionField)).map(Version::fromString); Optional<Instant> buildTime = optionalInstant(object.field(buildTimeField)); if ( ! authorEmail.isPresent()) return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong()); if ( ! compileVersion.isPresent() || ! buildTime.isPresent()) return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong(), authorEmail.get()); return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong(), authorEmail.get(), compileVersion.get(), buildTime.get()); } private Optional<SourceRevision> sourceRevisionFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new SourceRevision(object.field(repositoryField).asString(), object.field(branchField).asString(), object.field(commitField).asString())); } private DeploymentJobs deploymentJobsFromSlime(Inspector object) { OptionalLong projectId = optionalLong(object.field(projectIdField)); List<JobStatus> jobStatusList = jobStatusListFromSlime(object.field(jobStatusField)); Optional<IssueId> issueId = optionalString(object.field(issueIdField)).map(IssueId::from); boolean builtInternally = object.field(builtInternallyField).asBool(); return new DeploymentJobs(projectId, jobStatusList, issueId, builtInternally); } private Change changeFromSlime(Inspector object) { if ( ! object.valid()) return Change.empty(); Inspector versionFieldValue = object.field(versionField); Change change = Change.empty(); if (versionFieldValue.valid()) change = Change.of(Version.fromString(versionFieldValue.asString())); if (object.field(applicationBuildNumberField).valid()) change = change.with(applicationVersionFromSlime(object)); if (object.field(pinnedField).asBool()) change = change.withPin(); return change; } private List<JobStatus> jobStatusListFromSlime(Inspector array) { List<JobStatus> jobStatusList = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> jobStatusFromSlime(item).ifPresent(jobStatusList::add)); return jobStatusList; } private Optional<JobStatus> jobStatusFromSlime(Inspector object) { Optional<JobType> jobType = JobType.fromOptionalJobName(object.field(jobTypeField).asString()); if (! jobType.isPresent()) return Optional.empty(); Optional<JobError> jobError = Optional.empty(); if (object.field(errorField).valid()) jobError = Optional.of(JobError.valueOf(object.field(errorField).asString())); return Optional.of(new JobStatus(jobType.get(), jobError, jobRunFromSlime(object.field(lastTriggeredField)), jobRunFromSlime(object.field(lastCompletedField)), jobRunFromSlime(object.field(firstFailingField)), jobRunFromSlime(object.field(lastSuccessField)), optionalLong(object.field(pausedUntilField)))); } private Optional<JobStatus.JobRun> jobRunFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new JobStatus.JobRun(object.field(jobRunIdField).asLong(), new Version(object.field(versionField).asString()), applicationVersionFromSlime(object.field(revisionField)), optionalString(object.field(sourceVersionField)).map(Version::fromString), Optional.of(object.field(sourceApplicationField)).filter(Inspector::valid).map(this::applicationVersionFromSlime), object.field(reasonField).asString(), Instant.ofEpochMilli(object.field(atField).asLong()))); } private List<RotationId> rotationListFromSlime(Inspector field) { final var rotations = new ArrayList<RotationId>(); for (int i = 0; i < field.entries(); ++i) { var entry = field.entry(i); rotations.add(new RotationId(entry.asString())); } return rotations; } private Optional<RotationId> legacyRotationFromSlime(Inspector field) { return field.valid() ? optionalString(field).map(RotationId::new) : Optional.empty(); } private OptionalLong optionalLong(Inspector field) { return field.valid() ? OptionalLong.of(field.asLong()) : OptionalLong.empty(); } private OptionalInt optionalInteger(Inspector field) { return field.valid() ? OptionalInt.of((int) field.asLong()) : OptionalInt.empty(); } private OptionalDouble optionalDouble(Inspector field) { return field.valid() ? OptionalDouble.of(field.asDouble()) : OptionalDouble.empty(); } private Optional<String> optionalString(Inspector field) { return SlimeUtils.optionalString(field); } private Optional<Instant> optionalInstant(Inspector field) { OptionalLong value = optionalLong(field); return value.isPresent() ? Optional.of(Instant.ofEpochMilli(value.getAsLong())) : Optional.empty(); } }
class ApplicationSerializer { private final String idField = "id"; private final String createdAtField = "createdAt"; private final String deploymentSpecField = "deploymentSpecField"; private final String validationOverridesField = "validationOverrides"; private final String deploymentsField = "deployments"; private final String deploymentJobsField = "deploymentJobs"; private final String deployingField = "deployingField"; private final String pinnedField = "pinned"; private final String outstandingChangeField = "outstandingChangeField"; private final String ownershipIssueIdField = "ownershipIssueId"; private final String ownerField = "confirmedOwner"; private final String majorVersionField = "majorVersion"; private final String writeQualityField = "writeQuality"; private final String queryQualityField = "queryQuality"; private final String pemDeployKeyField = "pemDeployKey"; private final String rotationsField = "endpoints"; private final String deprecatedRotationField = "rotation"; private final String rotationStatusField = "rotationStatus"; private final String zoneField = "zone"; private final String environmentField = "environment"; private final String regionField = "region"; private final String deployTimeField = "deployTime"; private final String applicationBuildNumberField = "applicationBuildNumber"; private final String applicationPackageRevisionField = "applicationPackageRevision"; private final String sourceRevisionField = "sourceRevision"; private final String repositoryField = "repositoryField"; private final String branchField = "branchField"; private final String commitField = "commitField"; private final String authorEmailField = "authorEmailField"; private final String compileVersionField = "compileVersion"; private final String buildTimeField = "buildTime"; private final String lastQueriedField = "lastQueried"; private final String lastWrittenField = "lastWritten"; private final String lastQueriesPerSecondField = "lastQueriesPerSecond"; private final String lastWritesPerSecondField = "lastWritesPerSecond"; private final String projectIdField = "projectId"; private final String jobStatusField = "jobStatus"; private final String issueIdField = "jiraIssueId"; private final String builtInternallyField = "builtInternally"; private final String jobTypeField = "jobType"; private final String errorField = "jobError"; private final String lastTriggeredField = "lastTriggered"; private final String lastCompletedField = "lastCompleted"; private final String firstFailingField = "firstFailing"; private final String lastSuccessField = "lastSuccess"; private final String pausedUntilField = "pausedUntil"; private final String jobRunIdField = "id"; private final String versionField = "version"; private final String revisionField = "revision"; private final String sourceVersionField = "sourceVersion"; private final String sourceApplicationField = "sourceRevision"; private final String reasonField = "reason"; private final String atField = "at"; private final String clusterInfoField = "clusterInfo"; private final String clusterInfoFlavorField = "flavor"; private final String clusterInfoCostField = "cost"; private final String clusterInfoCpuField = "flavorCpu"; private final String clusterInfoMemField = "flavorMem"; private final String clusterInfoDiskField = "flavorDisk"; private final String clusterInfoTypeField = "clusterType"; private final String clusterInfoHostnamesField = "hostnames"; private final String clusterUtilsField = "clusterUtils"; private final String clusterUtilsCpuField = "cpu"; private final String clusterUtilsMemField = "mem"; private final String clusterUtilsDiskField = "disk"; private final String clusterUtilsDiskBusyField = "diskbusy"; private final String deploymentMetricsField = "metrics"; private final String deploymentMetricsQPSField = "queriesPerSecond"; private final String deploymentMetricsWPSField = "writesPerSecond"; private final String deploymentMetricsDocsField = "documentCount"; private final String deploymentMetricsQueryLatencyField = "queryLatencyMillis"; private final String deploymentMetricsWriteLatencyField = "writeLatencyMillis"; private final String deploymentMetricsUpdateTime = "lastUpdated"; private final String deploymentMetricsWarningsField = "warnings"; public Slime toSlime(Application application) { Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString(idField, application.id().serializedForm()); root.setLong(createdAtField, application.createdAt().toEpochMilli()); root.setString(deploymentSpecField, application.deploymentSpec().xmlForm()); root.setString(validationOverridesField, application.validationOverrides().xmlForm()); deploymentsToSlime(application.deployments().values(), root.setArray(deploymentsField)); toSlime(application.deploymentJobs(), root.setObject(deploymentJobsField)); toSlime(application.change(), root, deployingField); toSlime(application.outstandingChange(), root, outstandingChangeField); application.ownershipIssueId().ifPresent(issueId -> root.setString(ownershipIssueIdField, issueId.value())); application.owner().ifPresent(owner -> root.setString(ownerField, owner.username())); application.majorVersion().ifPresent(majorVersion -> root.setLong(majorVersionField, majorVersion)); root.setDouble(queryQualityField, application.metrics().queryServiceQuality()); root.setDouble(writeQualityField, application.metrics().writeServiceQuality()); application.pemDeployKey().ifPresent(pemDeployKey -> root.setString(pemDeployKeyField, pemDeployKey)); Cursor rotations = root.setArray(rotationsField); application.rotations().forEach(rotation -> rotations.addString(rotation.asString())); toSlime(application.rotationStatus(), root.setArray(rotationStatusField)); return slime; } private void deploymentsToSlime(Collection<Deployment> deployments, Cursor array) { for (Deployment deployment : deployments) deploymentToSlime(deployment, array.addObject()); } private void deploymentToSlime(Deployment deployment, Cursor object) { zoneIdToSlime(deployment.zone(), object.setObject(zoneField)); object.setString(versionField, deployment.version().toString()); object.setLong(deployTimeField, deployment.at().toEpochMilli()); toSlime(deployment.applicationVersion(), object.setObject(applicationPackageRevisionField)); clusterInfoToSlime(deployment.clusterInfo(), object); clusterUtilsToSlime(deployment.clusterUtils(), object); deploymentMetricsToSlime(deployment.metrics(), object); deployment.activity().lastQueried().ifPresent(instant -> object.setLong(lastQueriedField, instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> object.setLong(lastWrittenField, instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> object.setDouble(lastQueriesPerSecondField, value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> object.setDouble(lastWritesPerSecondField, value)); } private void deploymentMetricsToSlime(DeploymentMetrics metrics, Cursor object) { Cursor root = object.setObject(deploymentMetricsField); root.setDouble(deploymentMetricsQPSField, metrics.queriesPerSecond()); root.setDouble(deploymentMetricsWPSField, metrics.writesPerSecond()); root.setDouble(deploymentMetricsDocsField, metrics.documentCount()); root.setDouble(deploymentMetricsQueryLatencyField, metrics.queryLatencyMillis()); root.setDouble(deploymentMetricsWriteLatencyField, metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> root.setLong(deploymentMetricsUpdateTime, instant.toEpochMilli())); if (!metrics.warnings().isEmpty()) { Cursor warningsObject = root.setObject(deploymentMetricsWarningsField); metrics.warnings().forEach((warning, count) -> warningsObject.setLong(warning.name(), count)); } } private void clusterInfoToSlime(Map<ClusterSpec.Id, ClusterInfo> clusters, Cursor object) { Cursor root = object.setObject(clusterInfoField); for (Map.Entry<ClusterSpec.Id, ClusterInfo> entry : clusters.entrySet()) { toSlime(entry.getValue(), root.setObject(entry.getKey().value())); } } private void toSlime(ClusterInfo info, Cursor object) { object.setString(clusterInfoFlavorField, info.getFlavor()); object.setLong(clusterInfoCostField, info.getFlavorCost()); object.setDouble(clusterInfoCpuField, info.getFlavorCPU()); object.setDouble(clusterInfoMemField, info.getFlavorMem()); object.setDouble(clusterInfoDiskField, info.getFlavorDisk()); object.setString(clusterInfoTypeField, info.getClusterType().name()); Cursor array = object.setArray(clusterInfoHostnamesField); for (String host : info.getHostnames()) { array.addString(host); } } private void clusterUtilsToSlime(Map<ClusterSpec.Id, ClusterUtilization> clusters, Cursor object) { Cursor root = object.setObject(clusterUtilsField); for (Map.Entry<ClusterSpec.Id, ClusterUtilization> entry : clusters.entrySet()) { toSlime(entry.getValue(), root.setObject(entry.getKey().value())); } } private void toSlime(ClusterUtilization utils, Cursor object) { object.setDouble(clusterUtilsCpuField, utils.getCpu()); object.setDouble(clusterUtilsMemField, utils.getMemory()); object.setDouble(clusterUtilsDiskField, utils.getDisk()); object.setDouble(clusterUtilsDiskBusyField, utils.getDiskBusy()); } private void zoneIdToSlime(ZoneId zone, Cursor object) { object.setString(environmentField, zone.environment().value()); object.setString(regionField, zone.region().value()); } private void toSlime(ApplicationVersion applicationVersion, Cursor object) { if (applicationVersion.buildNumber().isPresent() && applicationVersion.source().isPresent()) { object.setLong(applicationBuildNumberField, applicationVersion.buildNumber().getAsLong()); toSlime(applicationVersion.source().get(), object.setObject(sourceRevisionField)); applicationVersion.authorEmail().ifPresent(email -> object.setString(authorEmailField, email)); applicationVersion.compileVersion().ifPresent(version -> object.setString(compileVersionField, version.toString())); applicationVersion.buildTime().ifPresent(time -> object.setLong(buildTimeField, time.toEpochMilli())); } } private void toSlime(SourceRevision sourceRevision, Cursor object) { object.setString(repositoryField, sourceRevision.repository()); object.setString(branchField, sourceRevision.branch()); object.setString(commitField, sourceRevision.commit()); } private void toSlime(DeploymentJobs deploymentJobs, Cursor cursor) { deploymentJobs.projectId().ifPresent(projectId -> cursor.setLong(projectIdField, projectId)); jobStatusToSlime(deploymentJobs.jobStatus().values(), cursor.setArray(jobStatusField)); deploymentJobs.issueId().ifPresent(jiraIssueId -> cursor.setString(issueIdField, jiraIssueId.value())); cursor.setBool(builtInternallyField, deploymentJobs.deployedInternally()); } private void jobStatusToSlime(Collection<JobStatus> jobStatuses, Cursor jobStatusArray) { for (JobStatus jobStatus : jobStatuses) toSlime(jobStatus, jobStatusArray.addObject()); } private void toSlime(JobStatus jobStatus, Cursor object) { object.setString(jobTypeField, jobStatus.type().jobName()); if (jobStatus.jobError().isPresent()) object.setString(errorField, jobStatus.jobError().get().name()); jobStatus.lastTriggered().ifPresent(run -> jobRunToSlime(run, object, lastTriggeredField)); jobStatus.lastCompleted().ifPresent(run -> jobRunToSlime(run, object, lastCompletedField)); jobStatus.lastSuccess().ifPresent(run -> jobRunToSlime(run, object, lastSuccessField)); jobStatus.firstFailing().ifPresent(run -> jobRunToSlime(run, object, firstFailingField)); jobStatus.pausedUntil().ifPresent(until -> object.setLong(pausedUntilField, until)); } private void jobRunToSlime(JobStatus.JobRun jobRun, Cursor parent, String jobRunObjectName) { Cursor object = parent.setObject(jobRunObjectName); object.setLong(jobRunIdField, jobRun.id()); object.setString(versionField, jobRun.platform().toString()); toSlime(jobRun.application(), object.setObject(revisionField)); jobRun.sourcePlatform().ifPresent(version -> object.setString(sourceVersionField, version.toString())); jobRun.sourceApplication().ifPresent(version -> toSlime(version, object.setObject(sourceApplicationField))); object.setString(reasonField, jobRun.reason()); object.setLong(atField, jobRun.at().toEpochMilli()); } private void toSlime(Change deploying, Cursor parentObject, String fieldName) { if (deploying.isEmpty()) return; Cursor object = parentObject.setObject(fieldName); if (deploying.platform().isPresent()) object.setString(versionField, deploying.platform().get().toString()); if (deploying.application().isPresent()) toSlime(deploying.application().get(), object); if (deploying.isPinned()) object.setBool(pinnedField, true); } private void toSlime(Map<HostName, RotationStatus> rotationStatus, Cursor array) { rotationStatus.forEach((hostname, status) -> { Cursor object = array.addObject(); object.setString("hostname", hostname.value()); object.setString("status", status.name()); }); } public Application fromSlime(Slime slime) { Inspector root = slime.get(); ApplicationId id = ApplicationId.fromSerializedForm(root.field(idField).asString()); Instant createdAt = Instant.ofEpochMilli(root.field(createdAtField).asLong()); DeploymentSpec deploymentSpec = DeploymentSpec.fromXml(root.field(deploymentSpecField).asString(), false); ValidationOverrides validationOverrides = ValidationOverrides.fromXml(root.field(validationOverridesField).asString()); List<Deployment> deployments = deploymentsFromSlime(root.field(deploymentsField)); DeploymentJobs deploymentJobs = deploymentJobsFromSlime(root.field(deploymentJobsField)); Change deploying = changeFromSlime(root.field(deployingField)); Change outstandingChange = changeFromSlime(root.field(outstandingChangeField)); Optional<IssueId> ownershipIssueId = optionalString(root.field(ownershipIssueIdField)).map(IssueId::from); Optional<User> owner = optionalString(root.field(ownerField)).map(User::from); OptionalInt majorVersion = optionalInteger(root.field(majorVersionField)); ApplicationMetrics metrics = new ApplicationMetrics(root.field(queryQualityField).asDouble(), root.field(writeQualityField).asDouble()); Optional<String> pemDeployKey = optionalString(root.field(pemDeployKeyField)); List<RotationId> rotations = rotationsFromSlime(root); Map<HostName, RotationStatus> rotationStatus = rotationStatusFromSlime(root.field(rotationStatusField)); return new Application(id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, deploying, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } private List<Deployment> deploymentsFromSlime(Inspector array) { List<Deployment> deployments = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> deployments.add(deploymentFromSlime(item))); return deployments; } private Deployment deploymentFromSlime(Inspector deploymentObject) { return new Deployment(zoneIdFromSlime(deploymentObject.field(zoneField)), applicationVersionFromSlime(deploymentObject.field(applicationPackageRevisionField)), Version.fromString(deploymentObject.field(versionField).asString()), Instant.ofEpochMilli(deploymentObject.field(deployTimeField).asLong()), clusterUtilsMapFromSlime(deploymentObject.field(clusterUtilsField)), clusterInfoMapFromSlime(deploymentObject.field(clusterInfoField)), deploymentMetricsFromSlime(deploymentObject.field(deploymentMetricsField)), DeploymentActivity.create(optionalInstant(deploymentObject.field(lastQueriedField)), optionalInstant(deploymentObject.field(lastWrittenField)), optionalDouble(deploymentObject.field(lastQueriesPerSecondField)), optionalDouble(deploymentObject.field(lastWritesPerSecondField)))); } private DeploymentMetrics deploymentMetricsFromSlime(Inspector object) { Optional<Instant> instant = object.field(deploymentMetricsUpdateTime).valid() ? Optional.of(Instant.ofEpochMilli(object.field(deploymentMetricsUpdateTime).asLong())) : Optional.empty(); return new DeploymentMetrics(object.field(deploymentMetricsQPSField).asDouble(), object.field(deploymentMetricsWPSField).asDouble(), object.field(deploymentMetricsDocsField).asDouble(), object.field(deploymentMetricsQueryLatencyField).asDouble(), object.field(deploymentMetricsWriteLatencyField).asDouble(), instant, deploymentWarningsFrom(object.field(deploymentMetricsWarningsField))); } private Map<DeploymentMetrics.Warning, Integer> deploymentWarningsFrom(Inspector object) { Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>(); object.traverse((ObjectTraverser) (name, value) -> warnings.put(DeploymentMetrics.Warning.valueOf(name), (int) value.asLong())); return Collections.unmodifiableMap(warnings); } private Map<HostName, RotationStatus> rotationStatusFromSlime(Inspector object) { if (!object.valid()) { return Collections.emptyMap(); } Map<HostName, RotationStatus> rotationStatus = new TreeMap<>(); object.traverse((ArrayTraverser) (idx, inspect) -> { HostName hostname = HostName.from(inspect.field("hostname").asString()); RotationStatus status = RotationStatus.valueOf(inspect.field("status").asString()); rotationStatus.put(hostname, status); }); return Collections.unmodifiableMap(rotationStatus); } private Map<ClusterSpec.Id, ClusterInfo> clusterInfoMapFromSlime (Inspector object) { Map<ClusterSpec.Id, ClusterInfo> map = new HashMap<>(); object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterInfoFromSlime(value))); return map; } private Map<ClusterSpec.Id, ClusterUtilization> clusterUtilsMapFromSlime(Inspector object) { Map<ClusterSpec.Id, ClusterUtilization> map = new HashMap<>(); object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterUtililzationFromSlime(value))); return map; } private ClusterUtilization clusterUtililzationFromSlime(Inspector object) { double cpu = object.field(clusterUtilsCpuField).asDouble(); double mem = object.field(clusterUtilsMemField).asDouble(); double disk = object.field(clusterUtilsDiskField).asDouble(); double diskBusy = object.field(clusterUtilsDiskBusyField).asDouble(); return new ClusterUtilization(mem, cpu, disk, diskBusy); } private ClusterInfo clusterInfoFromSlime(Inspector inspector) { String flavor = inspector.field(clusterInfoFlavorField).asString(); int cost = (int)inspector.field(clusterInfoCostField).asLong(); String type = inspector.field(clusterInfoTypeField).asString(); double flavorCpu = inspector.field(clusterInfoCpuField).asDouble(); double flavorMem = inspector.field(clusterInfoMemField).asDouble(); double flavorDisk = inspector.field(clusterInfoDiskField).asDouble(); List<String> hostnames = new ArrayList<>(); inspector.field(clusterInfoHostnamesField).traverse((ArrayTraverser)(int index, Inspector value) -> hostnames.add(value.asString())); return new ClusterInfo(flavor, cost, flavorCpu, flavorMem, flavorDisk, ClusterSpec.Type.from(type), hostnames); } private ZoneId zoneIdFromSlime(Inspector object) { return ZoneId.from(object.field(environmentField).asString(), object.field(regionField).asString()); } private ApplicationVersion applicationVersionFromSlime(Inspector object) { if ( ! object.valid()) return ApplicationVersion.unknown; OptionalLong applicationBuildNumber = optionalLong(object.field(applicationBuildNumberField)); Optional<SourceRevision> sourceRevision = sourceRevisionFromSlime(object.field(sourceRevisionField)); if ( ! sourceRevision.isPresent() || ! applicationBuildNumber.isPresent()) { return ApplicationVersion.unknown; } Optional<String> authorEmail = optionalString(object.field(authorEmailField)); Optional<Version> compileVersion = optionalString(object.field(compileVersionField)).map(Version::fromString); Optional<Instant> buildTime = optionalInstant(object.field(buildTimeField)); if ( ! authorEmail.isPresent()) return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong()); if ( ! compileVersion.isPresent() || ! buildTime.isPresent()) return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong(), authorEmail.get()); return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong(), authorEmail.get(), compileVersion.get(), buildTime.get()); } private Optional<SourceRevision> sourceRevisionFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new SourceRevision(object.field(repositoryField).asString(), object.field(branchField).asString(), object.field(commitField).asString())); } private DeploymentJobs deploymentJobsFromSlime(Inspector object) { OptionalLong projectId = optionalLong(object.field(projectIdField)); List<JobStatus> jobStatusList = jobStatusListFromSlime(object.field(jobStatusField)); Optional<IssueId> issueId = optionalString(object.field(issueIdField)).map(IssueId::from); boolean builtInternally = object.field(builtInternallyField).asBool(); return new DeploymentJobs(projectId, jobStatusList, issueId, builtInternally); } private Change changeFromSlime(Inspector object) { if ( ! object.valid()) return Change.empty(); Inspector versionFieldValue = object.field(versionField); Change change = Change.empty(); if (versionFieldValue.valid()) change = Change.of(Version.fromString(versionFieldValue.asString())); if (object.field(applicationBuildNumberField).valid()) change = change.with(applicationVersionFromSlime(object)); if (object.field(pinnedField).asBool()) change = change.withPin(); return change; } private List<JobStatus> jobStatusListFromSlime(Inspector array) { List<JobStatus> jobStatusList = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> jobStatusFromSlime(item).ifPresent(jobStatusList::add)); return jobStatusList; } private Optional<JobStatus> jobStatusFromSlime(Inspector object) { Optional<JobType> jobType = JobType.fromOptionalJobName(object.field(jobTypeField).asString()); if (! jobType.isPresent()) return Optional.empty(); Optional<JobError> jobError = Optional.empty(); if (object.field(errorField).valid()) jobError = Optional.of(JobError.valueOf(object.field(errorField).asString())); return Optional.of(new JobStatus(jobType.get(), jobError, jobRunFromSlime(object.field(lastTriggeredField)), jobRunFromSlime(object.field(lastCompletedField)), jobRunFromSlime(object.field(firstFailingField)), jobRunFromSlime(object.field(lastSuccessField)), optionalLong(object.field(pausedUntilField)))); } private Optional<JobStatus.JobRun> jobRunFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new JobStatus.JobRun(object.field(jobRunIdField).asLong(), new Version(object.field(versionField).asString()), applicationVersionFromSlime(object.field(revisionField)), optionalString(object.field(sourceVersionField)).map(Version::fromString), Optional.of(object.field(sourceApplicationField)).filter(Inspector::valid).map(this::applicationVersionFromSlime), object.field(reasonField).asString(), Instant.ofEpochMilli(object.field(atField).asLong()))); } private List<RotationId> rotationListFromSlime(Inspector field) { final var rotations = new ArrayList<RotationId>(); field.traverse((ArrayTraverser) (idx, inspector) -> { final var rotation = new RotationId(inspector.asString()); rotations.add(rotation); }); return rotations; } private Optional<RotationId> legacyRotationFromSlime(Inspector field) { return field.valid() ? optionalString(field).map(RotationId::new) : Optional.empty(); } private OptionalLong optionalLong(Inspector field) { return field.valid() ? OptionalLong.of(field.asLong()) : OptionalLong.empty(); } private OptionalInt optionalInteger(Inspector field) { return field.valid() ? OptionalInt.of((int) field.asLong()) : OptionalInt.empty(); } private OptionalDouble optionalDouble(Inspector field) { return field.valid() ? OptionalDouble.of(field.asDouble()) : OptionalDouble.empty(); } private Optional<String> optionalString(Inspector field) { return SlimeUtils.optionalString(field); } private Optional<Instant> optionalInstant(Inspector field) { OptionalLong value = optionalLong(field); return value.isPresent() ? Optional.of(Instant.ofEpochMilli(value.getAsLong())) : Optional.empty(); } }
no idea why this particular zone requires 4G RAM, sounds wrong - should not be needed - you can leave be and we can remove once we know why we need this - @hmusum ?
private NodeResources defaultNodeResources(ClusterSpec.Type clusterType) { if (zone.system() == SystemName.PublicCd && clusterType == ClusterSpec.Type.admin && zone.environment() != Environment.prod) return new NodeResources(1, 3, 50); if (zone.system() == SystemName.cd && zone.environment() == Environment.dev && zone.region().value().equals("cd-us-west-1")) return new NodeResources(1, 4, 50); if (zone.system() == SystemName.cd && zone.environment() == Environment.test || zone.environment() == Environment.staging) return clusterType == ClusterSpec.Type.admin ? new NodeResources(1, 3, 50) : new NodeResources(4, 4, 50); return new NodeResources(2, 8, 50); }
return new NodeResources(1, 4, 50);
private NodeResources defaultNodeResources(ClusterSpec.Type clusterType) { if (clusterType == ClusterSpec.Type.admin) return new NodeResources(0.5, 3, 50); if (zone.system() == SystemName.cd && zone.environment().isTest()) new NodeResources(4, 4, 50); return new NodeResources(2, 8, 50); }
class CapacityPolicies { private final Zone zone; private final NodeFlavors flavors; public CapacityPolicies(Zone zone, NodeFlavors flavors) { this.zone = zone; this.flavors = flavors; } public int decideSize(Capacity requestedCapacity, ClusterSpec.Type clusterType) { int requestedNodes = ensureRedundancy(requestedCapacity.nodeCount(), clusterType, requestedCapacity.canFail()); if (requestedCapacity.isRequired()) return requestedNodes; switch(zone.environment()) { case dev : case test : return 1; case perf : return Math.min(requestedCapacity.nodeCount(), 3); case staging: return requestedNodes <= 1 ? requestedNodes : Math.max(2, requestedNodes / 10); case prod : return requestedNodes; default : throw new IllegalArgumentException("Unsupported environment " + zone.environment()); } } public NodeResources decideNodeResources(Optional<NodeResources> requestedResources, ClusterSpec cluster) { NodeResources resources = specifiedOrDefaultNodeResources(requestedResources, cluster); if (resources.allocateByLegacyName()) return resources; if (zone.system() == SystemName.cd || zone.environment() == Environment.dev || zone.environment() == Environment.test) resources = resources.withDiskSpeed(NodeResources.DiskSpeed.any); if (zone.environment() == Environment.dev) resources = resources.withVcpu(0.1); return resources; } private NodeResources specifiedOrDefaultNodeResources(Optional<NodeResources> requestedResources, ClusterSpec cluster) { if (requestedResources.isPresent() && ! requestedResources.get().allocateByLegacyName()) return requestedResources.get(); if (requestedResources.isEmpty()) return defaultNodeResources(cluster.type()); if (zone.system() == SystemName.cd) return flavors.exists(requestedResources.get().legacyName().get()) ? requestedResources.get() : defaultNodeResources(cluster.type()); else { switch (zone.environment()) { case dev: case test: case staging: return defaultNodeResources(cluster.type()); default: flavors.getFlavorOrThrow(requestedResources.get().legacyName().get()); return requestedResources.get(); } } } /** * Whether or not the nodes requested can share physical host with other applications. * A security feature which only makes sense for prod. */ public boolean decideExclusivity(boolean requestedExclusivity) { return requestedExclusivity && zone.environment() == Environment.prod; } /** * Throw if the node count is 1 for container and content clusters and we're in a production zone * * @return the argument node count * @throws IllegalArgumentException if only one node is requested and we can fail */ private int ensureRedundancy(int nodeCount, ClusterSpec.Type clusterType, boolean canFail) { if (canFail && nodeCount == 1 && Arrays.asList(ClusterSpec.Type.container, ClusterSpec.Type.content).contains(clusterType) && zone.environment().isProduction()) throw new IllegalArgumentException("Deployments to prod require at least 2 nodes per cluster for redundancy"); return nodeCount; } }
class CapacityPolicies { private final Zone zone; private final NodeFlavors flavors; public CapacityPolicies(Zone zone, NodeFlavors flavors) { this.zone = zone; this.flavors = flavors; } public int decideSize(Capacity requestedCapacity, ClusterSpec.Type clusterType) { int requestedNodes = ensureRedundancy(requestedCapacity.nodeCount(), clusterType, requestedCapacity.canFail()); if (requestedCapacity.isRequired()) return requestedNodes; switch(zone.environment()) { case dev : case test : return 1; case perf : return Math.min(requestedCapacity.nodeCount(), 3); case staging: return requestedNodes <= 1 ? requestedNodes : Math.max(2, requestedNodes / 10); case prod : return requestedNodes; default : throw new IllegalArgumentException("Unsupported environment " + zone.environment()); } } public NodeResources decideNodeResources(Optional<NodeResources> requestedResources, ClusterSpec cluster) { NodeResources resources = specifiedOrDefaultNodeResources(requestedResources, cluster); if (resources.allocateByLegacyName()) return resources; if (zone.system() == SystemName.cd || zone.environment() == Environment.dev || zone.environment() == Environment.test) resources = resources.withDiskSpeed(NodeResources.DiskSpeed.any); if (zone.environment() == Environment.dev) resources = resources.withVcpu(0.1); return resources; } private NodeResources specifiedOrDefaultNodeResources(Optional<NodeResources> requestedResources, ClusterSpec cluster) { if (requestedResources.isPresent() && ! requestedResources.get().allocateByLegacyName()) return requestedResources.get(); if (requestedResources.isEmpty()) return defaultNodeResources(cluster.type()); if (zone.system() == SystemName.cd) return flavors.exists(requestedResources.get().legacyName().get()) ? requestedResources.get() : defaultNodeResources(cluster.type()); else { switch (zone.environment()) { case dev: case test: case staging: return defaultNodeResources(cluster.type()); default: flavors.getFlavorOrThrow(requestedResources.get().legacyName().get()); return requestedResources.get(); } } } /** * Whether or not the nodes requested can share physical host with other applications. * A security feature which only makes sense for prod. */ public boolean decideExclusivity(boolean requestedExclusivity) { return requestedExclusivity && zone.environment() == Environment.prod; } /** * Throw if the node count is 1 for container and content clusters and we're in a production zone * * @return the argument node count * @throws IllegalArgumentException if only one node is requested and we can fail */ private int ensureRedundancy(int nodeCount, ClusterSpec.Type clusterType, boolean canFail) { if (canFail && nodeCount == 1 && Arrays.asList(ClusterSpec.Type.container, ClusterSpec.Type.content).contains(clusterType) && zone.environment().isProduction()) throw new IllegalArgumentException("Deployments to prod require at least 2 nodes per cluster for redundancy"); return nodeCount; } }
No need for this, just remove it
private NodeResources defaultNodeResources(ClusterSpec.Type clusterType) { if (zone.system() == SystemName.PublicCd && clusterType == ClusterSpec.Type.admin && zone.environment() != Environment.prod) return new NodeResources(1, 3, 50); if (zone.system() == SystemName.cd && zone.environment() == Environment.dev && zone.region().value().equals("cd-us-west-1")) return new NodeResources(1, 4, 50); if (zone.system() == SystemName.cd && zone.environment() == Environment.test || zone.environment() == Environment.staging) return clusterType == ClusterSpec.Type.admin ? new NodeResources(1, 3, 50) : new NodeResources(4, 4, 50); return new NodeResources(2, 8, 50); }
return new NodeResources(1, 4, 50);
private NodeResources defaultNodeResources(ClusterSpec.Type clusterType) { if (clusterType == ClusterSpec.Type.admin) return new NodeResources(0.5, 3, 50); if (zone.system() == SystemName.cd && zone.environment().isTest()) new NodeResources(4, 4, 50); return new NodeResources(2, 8, 50); }
class CapacityPolicies { private final Zone zone; private final NodeFlavors flavors; public CapacityPolicies(Zone zone, NodeFlavors flavors) { this.zone = zone; this.flavors = flavors; } public int decideSize(Capacity requestedCapacity, ClusterSpec.Type clusterType) { int requestedNodes = ensureRedundancy(requestedCapacity.nodeCount(), clusterType, requestedCapacity.canFail()); if (requestedCapacity.isRequired()) return requestedNodes; switch(zone.environment()) { case dev : case test : return 1; case perf : return Math.min(requestedCapacity.nodeCount(), 3); case staging: return requestedNodes <= 1 ? requestedNodes : Math.max(2, requestedNodes / 10); case prod : return requestedNodes; default : throw new IllegalArgumentException("Unsupported environment " + zone.environment()); } } public NodeResources decideNodeResources(Optional<NodeResources> requestedResources, ClusterSpec cluster) { NodeResources resources = specifiedOrDefaultNodeResources(requestedResources, cluster); if (resources.allocateByLegacyName()) return resources; if (zone.system() == SystemName.cd || zone.environment() == Environment.dev || zone.environment() == Environment.test) resources = resources.withDiskSpeed(NodeResources.DiskSpeed.any); if (zone.environment() == Environment.dev) resources = resources.withVcpu(0.1); return resources; } private NodeResources specifiedOrDefaultNodeResources(Optional<NodeResources> requestedResources, ClusterSpec cluster) { if (requestedResources.isPresent() && ! requestedResources.get().allocateByLegacyName()) return requestedResources.get(); if (requestedResources.isEmpty()) return defaultNodeResources(cluster.type()); if (zone.system() == SystemName.cd) return flavors.exists(requestedResources.get().legacyName().get()) ? requestedResources.get() : defaultNodeResources(cluster.type()); else { switch (zone.environment()) { case dev: case test: case staging: return defaultNodeResources(cluster.type()); default: flavors.getFlavorOrThrow(requestedResources.get().legacyName().get()); return requestedResources.get(); } } } /** * Whether or not the nodes requested can share physical host with other applications. * A security feature which only makes sense for prod. */ public boolean decideExclusivity(boolean requestedExclusivity) { return requestedExclusivity && zone.environment() == Environment.prod; } /** * Throw if the node count is 1 for container and content clusters and we're in a production zone * * @return the argument node count * @throws IllegalArgumentException if only one node is requested and we can fail */ private int ensureRedundancy(int nodeCount, ClusterSpec.Type clusterType, boolean canFail) { if (canFail && nodeCount == 1 && Arrays.asList(ClusterSpec.Type.container, ClusterSpec.Type.content).contains(clusterType) && zone.environment().isProduction()) throw new IllegalArgumentException("Deployments to prod require at least 2 nodes per cluster for redundancy"); return nodeCount; } }
class CapacityPolicies { private final Zone zone; private final NodeFlavors flavors; public CapacityPolicies(Zone zone, NodeFlavors flavors) { this.zone = zone; this.flavors = flavors; } public int decideSize(Capacity requestedCapacity, ClusterSpec.Type clusterType) { int requestedNodes = ensureRedundancy(requestedCapacity.nodeCount(), clusterType, requestedCapacity.canFail()); if (requestedCapacity.isRequired()) return requestedNodes; switch(zone.environment()) { case dev : case test : return 1; case perf : return Math.min(requestedCapacity.nodeCount(), 3); case staging: return requestedNodes <= 1 ? requestedNodes : Math.max(2, requestedNodes / 10); case prod : return requestedNodes; default : throw new IllegalArgumentException("Unsupported environment " + zone.environment()); } } public NodeResources decideNodeResources(Optional<NodeResources> requestedResources, ClusterSpec cluster) { NodeResources resources = specifiedOrDefaultNodeResources(requestedResources, cluster); if (resources.allocateByLegacyName()) return resources; if (zone.system() == SystemName.cd || zone.environment() == Environment.dev || zone.environment() == Environment.test) resources = resources.withDiskSpeed(NodeResources.DiskSpeed.any); if (zone.environment() == Environment.dev) resources = resources.withVcpu(0.1); return resources; } private NodeResources specifiedOrDefaultNodeResources(Optional<NodeResources> requestedResources, ClusterSpec cluster) { if (requestedResources.isPresent() && ! requestedResources.get().allocateByLegacyName()) return requestedResources.get(); if (requestedResources.isEmpty()) return defaultNodeResources(cluster.type()); if (zone.system() == SystemName.cd) return flavors.exists(requestedResources.get().legacyName().get()) ? requestedResources.get() : defaultNodeResources(cluster.type()); else { switch (zone.environment()) { case dev: case test: case staging: return defaultNodeResources(cluster.type()); default: flavors.getFlavorOrThrow(requestedResources.get().legacyName().get()); return requestedResources.get(); } } } /** * Whether or not the nodes requested can share physical host with other applications. * A security feature which only makes sense for prod. */ public boolean decideExclusivity(boolean requestedExclusivity) { return requestedExclusivity && zone.environment() == Environment.prod; } /** * Throw if the node count is 1 for container and content clusters and we're in a production zone * * @return the argument node count * @throws IllegalArgumentException if only one node is requested and we can fail */ private int ensureRedundancy(int nodeCount, ClusterSpec.Type clusterType, boolean canFail) { if (canFail && nodeCount == 1 && Arrays.asList(ClusterSpec.Type.container, ClusterSpec.Type.content).contains(clusterType) && zone.environment().isProduction()) throw new IllegalArgumentException("Deployments to prod require at least 2 nodes per cluster for redundancy"); return nodeCount; } }
Done, thanks
private NodeResources defaultNodeResources(ClusterSpec.Type clusterType) { if (zone.system() == SystemName.PublicCd && clusterType == ClusterSpec.Type.admin && zone.environment() != Environment.prod) return new NodeResources(1, 3, 50); if (zone.system() == SystemName.cd && zone.environment() == Environment.dev && zone.region().value().equals("cd-us-west-1")) return new NodeResources(1, 4, 50); if (zone.system() == SystemName.cd && zone.environment() == Environment.test || zone.environment() == Environment.staging) return clusterType == ClusterSpec.Type.admin ? new NodeResources(1, 3, 50) : new NodeResources(4, 4, 50); return new NodeResources(2, 8, 50); }
return new NodeResources(1, 4, 50);
private NodeResources defaultNodeResources(ClusterSpec.Type clusterType) { if (clusterType == ClusterSpec.Type.admin) return new NodeResources(0.5, 3, 50); if (zone.system() == SystemName.cd && zone.environment().isTest()) new NodeResources(4, 4, 50); return new NodeResources(2, 8, 50); }
class CapacityPolicies { private final Zone zone; private final NodeFlavors flavors; public CapacityPolicies(Zone zone, NodeFlavors flavors) { this.zone = zone; this.flavors = flavors; } public int decideSize(Capacity requestedCapacity, ClusterSpec.Type clusterType) { int requestedNodes = ensureRedundancy(requestedCapacity.nodeCount(), clusterType, requestedCapacity.canFail()); if (requestedCapacity.isRequired()) return requestedNodes; switch(zone.environment()) { case dev : case test : return 1; case perf : return Math.min(requestedCapacity.nodeCount(), 3); case staging: return requestedNodes <= 1 ? requestedNodes : Math.max(2, requestedNodes / 10); case prod : return requestedNodes; default : throw new IllegalArgumentException("Unsupported environment " + zone.environment()); } } public NodeResources decideNodeResources(Optional<NodeResources> requestedResources, ClusterSpec cluster) { NodeResources resources = specifiedOrDefaultNodeResources(requestedResources, cluster); if (resources.allocateByLegacyName()) return resources; if (zone.system() == SystemName.cd || zone.environment() == Environment.dev || zone.environment() == Environment.test) resources = resources.withDiskSpeed(NodeResources.DiskSpeed.any); if (zone.environment() == Environment.dev) resources = resources.withVcpu(0.1); return resources; } private NodeResources specifiedOrDefaultNodeResources(Optional<NodeResources> requestedResources, ClusterSpec cluster) { if (requestedResources.isPresent() && ! requestedResources.get().allocateByLegacyName()) return requestedResources.get(); if (requestedResources.isEmpty()) return defaultNodeResources(cluster.type()); if (zone.system() == SystemName.cd) return flavors.exists(requestedResources.get().legacyName().get()) ? requestedResources.get() : defaultNodeResources(cluster.type()); else { switch (zone.environment()) { case dev: case test: case staging: return defaultNodeResources(cluster.type()); default: flavors.getFlavorOrThrow(requestedResources.get().legacyName().get()); return requestedResources.get(); } } } /** * Whether or not the nodes requested can share physical host with other applications. * A security feature which only makes sense for prod. */ public boolean decideExclusivity(boolean requestedExclusivity) { return requestedExclusivity && zone.environment() == Environment.prod; } /** * Throw if the node count is 1 for container and content clusters and we're in a production zone * * @return the argument node count * @throws IllegalArgumentException if only one node is requested and we can fail */ private int ensureRedundancy(int nodeCount, ClusterSpec.Type clusterType, boolean canFail) { if (canFail && nodeCount == 1 && Arrays.asList(ClusterSpec.Type.container, ClusterSpec.Type.content).contains(clusterType) && zone.environment().isProduction()) throw new IllegalArgumentException("Deployments to prod require at least 2 nodes per cluster for redundancy"); return nodeCount; } }
class CapacityPolicies { private final Zone zone; private final NodeFlavors flavors; public CapacityPolicies(Zone zone, NodeFlavors flavors) { this.zone = zone; this.flavors = flavors; } public int decideSize(Capacity requestedCapacity, ClusterSpec.Type clusterType) { int requestedNodes = ensureRedundancy(requestedCapacity.nodeCount(), clusterType, requestedCapacity.canFail()); if (requestedCapacity.isRequired()) return requestedNodes; switch(zone.environment()) { case dev : case test : return 1; case perf : return Math.min(requestedCapacity.nodeCount(), 3); case staging: return requestedNodes <= 1 ? requestedNodes : Math.max(2, requestedNodes / 10); case prod : return requestedNodes; default : throw new IllegalArgumentException("Unsupported environment " + zone.environment()); } } public NodeResources decideNodeResources(Optional<NodeResources> requestedResources, ClusterSpec cluster) { NodeResources resources = specifiedOrDefaultNodeResources(requestedResources, cluster); if (resources.allocateByLegacyName()) return resources; if (zone.system() == SystemName.cd || zone.environment() == Environment.dev || zone.environment() == Environment.test) resources = resources.withDiskSpeed(NodeResources.DiskSpeed.any); if (zone.environment() == Environment.dev) resources = resources.withVcpu(0.1); return resources; } private NodeResources specifiedOrDefaultNodeResources(Optional<NodeResources> requestedResources, ClusterSpec cluster) { if (requestedResources.isPresent() && ! requestedResources.get().allocateByLegacyName()) return requestedResources.get(); if (requestedResources.isEmpty()) return defaultNodeResources(cluster.type()); if (zone.system() == SystemName.cd) return flavors.exists(requestedResources.get().legacyName().get()) ? requestedResources.get() : defaultNodeResources(cluster.type()); else { switch (zone.environment()) { case dev: case test: case staging: return defaultNodeResources(cluster.type()); default: flavors.getFlavorOrThrow(requestedResources.get().legacyName().get()); return requestedResources.get(); } } } /** * Whether or not the nodes requested can share physical host with other applications. * A security feature which only makes sense for prod. */ public boolean decideExclusivity(boolean requestedExclusivity) { return requestedExclusivity && zone.environment() == Environment.prod; } /** * Throw if the node count is 1 for container and content clusters and we're in a production zone * * @return the argument node count * @throws IllegalArgumentException if only one node is requested and we can fail */ private int ensureRedundancy(int nodeCount, ClusterSpec.Type clusterType, boolean canFail) { if (canFail && nodeCount == 1 && Arrays.asList(ClusterSpec.Type.container, ClusterSpec.Type.content).contains(clusterType) && zone.environment().isProduction()) throw new IllegalArgumentException("Deployments to prod require at least 2 nodes per cluster for redundancy"); return nodeCount; } }
👌
public boolean parse(String value) { return true; }
return true;
public boolean parse(String value) { try { Short parsed = Short.parseShort(value); if (parsed > 127 || parsed < -128) { return false; } } catch (NumberFormatException e) { return false; } return true; }
class TinyIntParser extends ColumnParser { @Override }
class TinyIntParser extends ColumnParser { @Override }
`zone.environment() == Environment.test || zone.environment() == Environment.staging` == `zone.environment().isTest()`
private NodeResources defaultNodeResources(ClusterSpec.Type clusterType) { if (zone.system() != SystemName.cd && clusterType == ClusterSpec.Type.admin) return new NodeResources(0.5, 3, 50); if (zone.system() == SystemName.PublicCd && clusterType == ClusterSpec.Type.admin && zone.environment() != Environment.prod) return new NodeResources(1, 3, 50); if (zone.system() == SystemName.cd && zone.environment() == Environment.test || zone.environment() == Environment.staging) return clusterType == ClusterSpec.Type.admin ? new NodeResources(1, 3, 50) : new NodeResources(4, 4, 50); return new NodeResources(2, 8, 50); }
if (zone.system() == SystemName.cd && zone.environment() == Environment.test || zone.environment() == Environment.staging)
private NodeResources defaultNodeResources(ClusterSpec.Type clusterType) { if (clusterType == ClusterSpec.Type.admin) return new NodeResources(0.5, 3, 50); if (zone.system() == SystemName.cd && zone.environment().isTest()) new NodeResources(4, 4, 50); return new NodeResources(2, 8, 50); }
class CapacityPolicies { private final Zone zone; private final NodeFlavors flavors; public CapacityPolicies(Zone zone, NodeFlavors flavors) { this.zone = zone; this.flavors = flavors; } public int decideSize(Capacity requestedCapacity, ClusterSpec.Type clusterType) { int requestedNodes = ensureRedundancy(requestedCapacity.nodeCount(), clusterType, requestedCapacity.canFail()); if (requestedCapacity.isRequired()) return requestedNodes; switch(zone.environment()) { case dev : case test : return 1; case perf : return Math.min(requestedCapacity.nodeCount(), 3); case staging: return requestedNodes <= 1 ? requestedNodes : Math.max(2, requestedNodes / 10); case prod : return requestedNodes; default : throw new IllegalArgumentException("Unsupported environment " + zone.environment()); } } public NodeResources decideNodeResources(Optional<NodeResources> requestedResources, ClusterSpec cluster) { NodeResources resources = specifiedOrDefaultNodeResources(requestedResources, cluster); if (resources.allocateByLegacyName()) return resources; if (zone.system() == SystemName.cd || zone.environment() == Environment.dev || zone.environment() == Environment.test) resources = resources.withDiskSpeed(NodeResources.DiskSpeed.any); if (zone.environment() == Environment.dev) resources = resources.withVcpu(0.1); return resources; } private NodeResources specifiedOrDefaultNodeResources(Optional<NodeResources> requestedResources, ClusterSpec cluster) { if (requestedResources.isPresent() && ! requestedResources.get().allocateByLegacyName()) return requestedResources.get(); if (requestedResources.isEmpty()) return defaultNodeResources(cluster.type()); if (zone.system() == SystemName.cd) return flavors.exists(requestedResources.get().legacyName().get()) ? requestedResources.get() : defaultNodeResources(cluster.type()); else { switch (zone.environment()) { case dev: case test: case staging: return defaultNodeResources(cluster.type()); default: flavors.getFlavorOrThrow(requestedResources.get().legacyName().get()); return requestedResources.get(); } } } /** * Whether or not the nodes requested can share physical host with other applications. * A security feature which only makes sense for prod. */ public boolean decideExclusivity(boolean requestedExclusivity) { return requestedExclusivity && zone.environment() == Environment.prod; } /** * Throw if the node count is 1 for container and content clusters and we're in a production zone * * @return the argument node count * @throws IllegalArgumentException if only one node is requested and we can fail */ private int ensureRedundancy(int nodeCount, ClusterSpec.Type clusterType, boolean canFail) { if (canFail && nodeCount == 1 && Arrays.asList(ClusterSpec.Type.container, ClusterSpec.Type.content).contains(clusterType) && zone.environment().isProduction()) throw new IllegalArgumentException("Deployments to prod require at least 2 nodes per cluster for redundancy"); return nodeCount; } }
class CapacityPolicies { private final Zone zone; private final NodeFlavors flavors; public CapacityPolicies(Zone zone, NodeFlavors flavors) { this.zone = zone; this.flavors = flavors; } public int decideSize(Capacity requestedCapacity, ClusterSpec.Type clusterType) { int requestedNodes = ensureRedundancy(requestedCapacity.nodeCount(), clusterType, requestedCapacity.canFail()); if (requestedCapacity.isRequired()) return requestedNodes; switch(zone.environment()) { case dev : case test : return 1; case perf : return Math.min(requestedCapacity.nodeCount(), 3); case staging: return requestedNodes <= 1 ? requestedNodes : Math.max(2, requestedNodes / 10); case prod : return requestedNodes; default : throw new IllegalArgumentException("Unsupported environment " + zone.environment()); } } public NodeResources decideNodeResources(Optional<NodeResources> requestedResources, ClusterSpec cluster) { NodeResources resources = specifiedOrDefaultNodeResources(requestedResources, cluster); if (resources.allocateByLegacyName()) return resources; if (zone.system() == SystemName.cd || zone.environment() == Environment.dev || zone.environment() == Environment.test) resources = resources.withDiskSpeed(NodeResources.DiskSpeed.any); if (zone.environment() == Environment.dev) resources = resources.withVcpu(0.1); return resources; } private NodeResources specifiedOrDefaultNodeResources(Optional<NodeResources> requestedResources, ClusterSpec cluster) { if (requestedResources.isPresent() && ! requestedResources.get().allocateByLegacyName()) return requestedResources.get(); if (requestedResources.isEmpty()) return defaultNodeResources(cluster.type()); if (zone.system() == SystemName.cd) return flavors.exists(requestedResources.get().legacyName().get()) ? requestedResources.get() : defaultNodeResources(cluster.type()); else { switch (zone.environment()) { case dev: case test: case staging: return defaultNodeResources(cluster.type()); default: flavors.getFlavorOrThrow(requestedResources.get().legacyName().get()); return requestedResources.get(); } } } /** * Whether or not the nodes requested can share physical host with other applications. * A security feature which only makes sense for prod. */ public boolean decideExclusivity(boolean requestedExclusivity) { return requestedExclusivity && zone.environment() == Environment.prod; } /** * Throw if the node count is 1 for container and content clusters and we're in a production zone * * @return the argument node count * @throws IllegalArgumentException if only one node is requested and we can fail */ private int ensureRedundancy(int nodeCount, ClusterSpec.Type clusterType, boolean canFail) { if (canFail && nodeCount == 1 && Arrays.asList(ClusterSpec.Type.container, ClusterSpec.Type.content).contains(clusterType) && zone.environment().isProduction()) throw new IllegalArgumentException("Deployments to prod require at least 2 nodes per cluster for redundancy"); return nodeCount; } }
We would need more Docker hosts, since we need more memory for the non-admin case. The admin clause can be removed.
private NodeResources defaultNodeResources(ClusterSpec.Type clusterType) { if (zone.system() == SystemName.PublicCd && clusterType == ClusterSpec.Type.admin && zone.environment() != Environment.prod) return new NodeResources(1, 3, 50); if (zone.system() == SystemName.cd && zone.environment() == Environment.dev && zone.region().value().equals("cd-us-west-1")) return new NodeResources(1, 4, 50); if (zone.system() == SystemName.cd && zone.environment() == Environment.test || zone.environment() == Environment.staging) return clusterType == ClusterSpec.Type.admin ? new NodeResources(1, 3, 50) : new NodeResources(4, 4, 50); return new NodeResources(2, 8, 50); }
return new NodeResources(1, 4, 50);
private NodeResources defaultNodeResources(ClusterSpec.Type clusterType) { if (clusterType == ClusterSpec.Type.admin) return new NodeResources(0.5, 3, 50); if (zone.system() == SystemName.cd && zone.environment().isTest()) new NodeResources(4, 4, 50); return new NodeResources(2, 8, 50); }
class CapacityPolicies { private final Zone zone; private final NodeFlavors flavors; public CapacityPolicies(Zone zone, NodeFlavors flavors) { this.zone = zone; this.flavors = flavors; } public int decideSize(Capacity requestedCapacity, ClusterSpec.Type clusterType) { int requestedNodes = ensureRedundancy(requestedCapacity.nodeCount(), clusterType, requestedCapacity.canFail()); if (requestedCapacity.isRequired()) return requestedNodes; switch(zone.environment()) { case dev : case test : return 1; case perf : return Math.min(requestedCapacity.nodeCount(), 3); case staging: return requestedNodes <= 1 ? requestedNodes : Math.max(2, requestedNodes / 10); case prod : return requestedNodes; default : throw new IllegalArgumentException("Unsupported environment " + zone.environment()); } } public NodeResources decideNodeResources(Optional<NodeResources> requestedResources, ClusterSpec cluster) { NodeResources resources = specifiedOrDefaultNodeResources(requestedResources, cluster); if (resources.allocateByLegacyName()) return resources; if (zone.system() == SystemName.cd || zone.environment() == Environment.dev || zone.environment() == Environment.test) resources = resources.withDiskSpeed(NodeResources.DiskSpeed.any); if (zone.environment() == Environment.dev) resources = resources.withVcpu(0.1); return resources; } private NodeResources specifiedOrDefaultNodeResources(Optional<NodeResources> requestedResources, ClusterSpec cluster) { if (requestedResources.isPresent() && ! requestedResources.get().allocateByLegacyName()) return requestedResources.get(); if (requestedResources.isEmpty()) return defaultNodeResources(cluster.type()); if (zone.system() == SystemName.cd) return flavors.exists(requestedResources.get().legacyName().get()) ? requestedResources.get() : defaultNodeResources(cluster.type()); else { switch (zone.environment()) { case dev: case test: case staging: return defaultNodeResources(cluster.type()); default: flavors.getFlavorOrThrow(requestedResources.get().legacyName().get()); return requestedResources.get(); } } } /** * Whether or not the nodes requested can share physical host with other applications. * A security feature which only makes sense for prod. */ public boolean decideExclusivity(boolean requestedExclusivity) { return requestedExclusivity && zone.environment() == Environment.prod; } /** * Throw if the node count is 1 for container and content clusters and we're in a production zone * * @return the argument node count * @throws IllegalArgumentException if only one node is requested and we can fail */ private int ensureRedundancy(int nodeCount, ClusterSpec.Type clusterType, boolean canFail) { if (canFail && nodeCount == 1 && Arrays.asList(ClusterSpec.Type.container, ClusterSpec.Type.content).contains(clusterType) && zone.environment().isProduction()) throw new IllegalArgumentException("Deployments to prod require at least 2 nodes per cluster for redundancy"); return nodeCount; } }
class CapacityPolicies { private final Zone zone; private final NodeFlavors flavors; public CapacityPolicies(Zone zone, NodeFlavors flavors) { this.zone = zone; this.flavors = flavors; } public int decideSize(Capacity requestedCapacity, ClusterSpec.Type clusterType) { int requestedNodes = ensureRedundancy(requestedCapacity.nodeCount(), clusterType, requestedCapacity.canFail()); if (requestedCapacity.isRequired()) return requestedNodes; switch(zone.environment()) { case dev : case test : return 1; case perf : return Math.min(requestedCapacity.nodeCount(), 3); case staging: return requestedNodes <= 1 ? requestedNodes : Math.max(2, requestedNodes / 10); case prod : return requestedNodes; default : throw new IllegalArgumentException("Unsupported environment " + zone.environment()); } } public NodeResources decideNodeResources(Optional<NodeResources> requestedResources, ClusterSpec cluster) { NodeResources resources = specifiedOrDefaultNodeResources(requestedResources, cluster); if (resources.allocateByLegacyName()) return resources; if (zone.system() == SystemName.cd || zone.environment() == Environment.dev || zone.environment() == Environment.test) resources = resources.withDiskSpeed(NodeResources.DiskSpeed.any); if (zone.environment() == Environment.dev) resources = resources.withVcpu(0.1); return resources; } private NodeResources specifiedOrDefaultNodeResources(Optional<NodeResources> requestedResources, ClusterSpec cluster) { if (requestedResources.isPresent() && ! requestedResources.get().allocateByLegacyName()) return requestedResources.get(); if (requestedResources.isEmpty()) return defaultNodeResources(cluster.type()); if (zone.system() == SystemName.cd) return flavors.exists(requestedResources.get().legacyName().get()) ? requestedResources.get() : defaultNodeResources(cluster.type()); else { switch (zone.environment()) { case dev: case test: case staging: return defaultNodeResources(cluster.type()); default: flavors.getFlavorOrThrow(requestedResources.get().legacyName().get()); return requestedResources.get(); } } } /** * Whether or not the nodes requested can share physical host with other applications. * A security feature which only makes sense for prod. */ public boolean decideExclusivity(boolean requestedExclusivity) { return requestedExclusivity && zone.environment() == Environment.prod; } /** * Throw if the node count is 1 for container and content clusters and we're in a production zone * * @return the argument node count * @throws IllegalArgumentException if only one node is requested and we can fail */ private int ensureRedundancy(int nodeCount, ClusterSpec.Type clusterType, boolean canFail) { if (canFail && nodeCount == 1 && Arrays.asList(ClusterSpec.Type.container, ClusterSpec.Type.content).contains(clusterType) && zone.environment().isProduction()) throw new IllegalArgumentException("Deployments to prod require at least 2 nodes per cluster for redundancy"); return nodeCount; } }
This one can be removed as well
private NodeResources defaultNodeResources(ClusterSpec.Type clusterType) { if (zone.system() != SystemName.cd && clusterType == ClusterSpec.Type.admin) return new NodeResources(0.5, 3, 50); if (zone.system() == SystemName.PublicCd && clusterType == ClusterSpec.Type.admin && zone.environment() != Environment.prod) return new NodeResources(1, 3, 50); if (zone.system() == SystemName.cd && zone.environment() == Environment.test || zone.environment() == Environment.staging) return clusterType == ClusterSpec.Type.admin ? new NodeResources(1, 3, 50) : new NodeResources(4, 4, 50); return new NodeResources(2, 8, 50); }
if (zone.system() == SystemName.PublicCd && clusterType == ClusterSpec.Type.admin && zone.environment() != Environment.prod)
private NodeResources defaultNodeResources(ClusterSpec.Type clusterType) { if (clusterType == ClusterSpec.Type.admin) return new NodeResources(0.5, 3, 50); if (zone.system() == SystemName.cd && zone.environment().isTest()) new NodeResources(4, 4, 50); return new NodeResources(2, 8, 50); }
class CapacityPolicies { private final Zone zone; private final NodeFlavors flavors; public CapacityPolicies(Zone zone, NodeFlavors flavors) { this.zone = zone; this.flavors = flavors; } public int decideSize(Capacity requestedCapacity, ClusterSpec.Type clusterType) { int requestedNodes = ensureRedundancy(requestedCapacity.nodeCount(), clusterType, requestedCapacity.canFail()); if (requestedCapacity.isRequired()) return requestedNodes; switch(zone.environment()) { case dev : case test : return 1; case perf : return Math.min(requestedCapacity.nodeCount(), 3); case staging: return requestedNodes <= 1 ? requestedNodes : Math.max(2, requestedNodes / 10); case prod : return requestedNodes; default : throw new IllegalArgumentException("Unsupported environment " + zone.environment()); } } public NodeResources decideNodeResources(Optional<NodeResources> requestedResources, ClusterSpec cluster) { NodeResources resources = specifiedOrDefaultNodeResources(requestedResources, cluster); if (resources.allocateByLegacyName()) return resources; if (zone.system() == SystemName.cd || zone.environment() == Environment.dev || zone.environment() == Environment.test) resources = resources.withDiskSpeed(NodeResources.DiskSpeed.any); if (zone.environment() == Environment.dev) resources = resources.withVcpu(0.1); return resources; } private NodeResources specifiedOrDefaultNodeResources(Optional<NodeResources> requestedResources, ClusterSpec cluster) { if (requestedResources.isPresent() && ! requestedResources.get().allocateByLegacyName()) return requestedResources.get(); if (requestedResources.isEmpty()) return defaultNodeResources(cluster.type()); if (zone.system() == SystemName.cd) return flavors.exists(requestedResources.get().legacyName().get()) ? requestedResources.get() : defaultNodeResources(cluster.type()); else { switch (zone.environment()) { case dev: case test: case staging: return defaultNodeResources(cluster.type()); default: flavors.getFlavorOrThrow(requestedResources.get().legacyName().get()); return requestedResources.get(); } } } /** * Whether or not the nodes requested can share physical host with other applications. * A security feature which only makes sense for prod. */ public boolean decideExclusivity(boolean requestedExclusivity) { return requestedExclusivity && zone.environment() == Environment.prod; } /** * Throw if the node count is 1 for container and content clusters and we're in a production zone * * @return the argument node count * @throws IllegalArgumentException if only one node is requested and we can fail */ private int ensureRedundancy(int nodeCount, ClusterSpec.Type clusterType, boolean canFail) { if (canFail && nodeCount == 1 && Arrays.asList(ClusterSpec.Type.container, ClusterSpec.Type.content).contains(clusterType) && zone.environment().isProduction()) throw new IllegalArgumentException("Deployments to prod require at least 2 nodes per cluster for redundancy"); return nodeCount; } }
class CapacityPolicies { private final Zone zone; private final NodeFlavors flavors; public CapacityPolicies(Zone zone, NodeFlavors flavors) { this.zone = zone; this.flavors = flavors; } public int decideSize(Capacity requestedCapacity, ClusterSpec.Type clusterType) { int requestedNodes = ensureRedundancy(requestedCapacity.nodeCount(), clusterType, requestedCapacity.canFail()); if (requestedCapacity.isRequired()) return requestedNodes; switch(zone.environment()) { case dev : case test : return 1; case perf : return Math.min(requestedCapacity.nodeCount(), 3); case staging: return requestedNodes <= 1 ? requestedNodes : Math.max(2, requestedNodes / 10); case prod : return requestedNodes; default : throw new IllegalArgumentException("Unsupported environment " + zone.environment()); } } public NodeResources decideNodeResources(Optional<NodeResources> requestedResources, ClusterSpec cluster) { NodeResources resources = specifiedOrDefaultNodeResources(requestedResources, cluster); if (resources.allocateByLegacyName()) return resources; if (zone.system() == SystemName.cd || zone.environment() == Environment.dev || zone.environment() == Environment.test) resources = resources.withDiskSpeed(NodeResources.DiskSpeed.any); if (zone.environment() == Environment.dev) resources = resources.withVcpu(0.1); return resources; } private NodeResources specifiedOrDefaultNodeResources(Optional<NodeResources> requestedResources, ClusterSpec cluster) { if (requestedResources.isPresent() && ! requestedResources.get().allocateByLegacyName()) return requestedResources.get(); if (requestedResources.isEmpty()) return defaultNodeResources(cluster.type()); if (zone.system() == SystemName.cd) return flavors.exists(requestedResources.get().legacyName().get()) ? requestedResources.get() : defaultNodeResources(cluster.type()); else { switch (zone.environment()) { case dev: case test: case staging: return defaultNodeResources(cluster.type()); default: flavors.getFlavorOrThrow(requestedResources.get().legacyName().get()); return requestedResources.get(); } } } /** * Whether or not the nodes requested can share physical host with other applications. * A security feature which only makes sense for prod. */ public boolean decideExclusivity(boolean requestedExclusivity) { return requestedExclusivity && zone.environment() == Environment.prod; } /** * Throw if the node count is 1 for container and content clusters and we're in a production zone * * @return the argument node count * @throws IllegalArgumentException if only one node is requested and we can fail */ private int ensureRedundancy(int nodeCount, ClusterSpec.Type clusterType, boolean canFail) { if (canFail && nodeCount == 1 && Arrays.asList(ClusterSpec.Type.container, ClusterSpec.Type.content).contains(clusterType) && zone.environment().isProduction()) throw new IllegalArgumentException("Deployments to prod require at least 2 nodes per cluster for redundancy"); return nodeCount; } }
Fyi, the log() overloads taking a throwable will also print the stack trace to the log.
public String serialize() { ObjectMapper mapper = JacksonUtil.createObjectMapper(); try { return mapper.writeValueAsString(this); } catch (IOException e) { e.printStackTrace(); throw new RuntimeException("Could not render metrics. Check the log for details."); } }
e.printStackTrace();
public String serialize() { ObjectMapper mapper = JacksonUtil.createObjectMapper(); try { return mapper.writeValueAsString(this); } catch (IOException e) { log.log(Level.WARNING, "Got exception when rendering metrics:", e); throw new RuntimeException("Could not render metrics. Check the log for details."); } }
class GenericJsonModel { private static Logger log = Logger.getLogger(GenericJsonModel.class.getName()); @JsonProperty("node") public GenericNode node; @JsonProperty("services") public List<GenericService> services; }
class GenericJsonModel { private static Logger log = Logger.getLogger(GenericJsonModel.class.getName()); @JsonProperty("node") public GenericNode node; @JsonProperty("services") public List<GenericService> services; }
Nit: Camel case.
public void can_only_downgrade_with_force() { assertTrue(infrastructureVersions.getTargetVersions().isEmpty()); assertEquals(defaultVersion, infrastructureVersions.getTargetVersionFor(NodeType.config)); infrastructureVersions.setTargetVersion(NodeType.config, version, false); assertEquals(version, infrastructureVersions.getTargetVersionFor(NodeType.config)); Version new_version = Version.fromString("6.123.457"); infrastructureVersions.setTargetVersion(NodeType.config, new_version, false); assertEquals(new_version, infrastructureVersions.getTargetVersionFor(NodeType.config)); assertThrows(IllegalArgumentException.class, () -> infrastructureVersions.setTargetVersion(NodeType.config, version, false)); infrastructureVersions.setTargetVersion(NodeType.config, version, true); assertEquals(version, infrastructureVersions.getTargetVersionFor(NodeType.config)); }
Version new_version = Version.fromString("6.123.457");
public void can_only_downgrade_with_force() { assertTrue(infrastructureVersions.getTargetVersions().isEmpty()); assertEquals(defaultVersion, infrastructureVersions.getTargetVersionFor(NodeType.config)); infrastructureVersions.setTargetVersion(NodeType.config, version, false); assertEquals(version, infrastructureVersions.getTargetVersionFor(NodeType.config)); Version newVersion = Version.fromString("6.123.457"); infrastructureVersions.setTargetVersion(NodeType.config, newVersion, false); assertEquals(newVersion, infrastructureVersions.getTargetVersionFor(NodeType.config)); assertThrows(IllegalArgumentException.class, () -> infrastructureVersions.setTargetVersion(NodeType.config, version, false)); infrastructureVersions.setTargetVersion(NodeType.config, version, true); assertEquals(version, infrastructureVersions.getTargetVersionFor(NodeType.config)); }
class InfrastructureVersionsTest { private final Version defaultVersion = Version.fromString("6.13.37"); private final NodeRepositoryTester tester = new NodeRepositoryTester(); private final InfrastructureVersions infrastructureVersions = new InfrastructureVersions(tester.nodeRepository().database(), NodeType.config, defaultVersion); private final Version version = Version.fromString("6.123.456"); @Test @Test public void can_only_set_version_on_certain_node_types() { infrastructureVersions.setTargetVersion(NodeType.config, version, false); assertThrows(IllegalArgumentException.class, () -> infrastructureVersions.setTargetVersion(NodeType.tenant, version, false)); assertThrows(IllegalArgumentException.class, () -> infrastructureVersions.setTargetVersion(NodeType.tenant, version, true)); } @Test public void store_all_valid_for_config() { infrastructureVersions.setTargetVersion(NodeType.config, version, false); infrastructureVersions.setTargetVersion(NodeType.confighost, version, false); infrastructureVersions.setTargetVersion(NodeType.proxyhost, version, false); assertThrows(IllegalArgumentException.class, () -> infrastructureVersions.setTargetVersion(NodeType.controller, version, false)); assertThrows(IllegalArgumentException.class, () -> infrastructureVersions.setTargetVersion(NodeType.controllerhost, version, false)); Map<NodeType, Version> expected = Map.of( NodeType.config, version, NodeType.confighost, version, NodeType.proxyhost, version); assertEquals(expected, infrastructureVersions.getTargetVersions()); } @Test public void store_all_valid_for_controller() { InfrastructureVersions infrastructureVersions = new InfrastructureVersions(tester.nodeRepository().database(), NodeType.controller, defaultVersion); infrastructureVersions.setTargetVersion(NodeType.controller, version, false); infrastructureVersions.setTargetVersion(NodeType.controllerhost, version, false); infrastructureVersions.setTargetVersion(NodeType.proxyhost, version, false); assertThrows(IllegalArgumentException.class, () -> infrastructureVersions.setTargetVersion(NodeType.config, version, false)); assertThrows(IllegalArgumentException.class, () -> infrastructureVersions.setTargetVersion(NodeType.confighost, version, false)); Map<NodeType, Version> expected = Map.of( NodeType.controller, version, NodeType.controllerhost, version, NodeType.proxyhost, version); assertEquals(expected, infrastructureVersions.getTargetVersions()); } private static void assertThrows(Class<? extends Throwable> clazz, Runnable runnable) { try { runnable.run(); fail("Expected " + clazz); } catch (Throwable e) { if (!clazz.isInstance(e)) throw e; } } }
class InfrastructureVersionsTest { private final Version defaultVersion = Version.fromString("6.13.37"); private final NodeRepositoryTester tester = new NodeRepositoryTester(); private final InfrastructureVersions infrastructureVersions = new InfrastructureVersions(tester.nodeRepository().database(), NodeType.config, defaultVersion); private final Version version = Version.fromString("6.123.456"); @Test @Test public void can_only_set_version_on_certain_node_types() { infrastructureVersions.setTargetVersion(NodeType.config, version, false); assertThrows(IllegalArgumentException.class, () -> infrastructureVersions.setTargetVersion(NodeType.tenant, version, false)); assertThrows(IllegalArgumentException.class, () -> infrastructureVersions.setTargetVersion(NodeType.tenant, version, true)); } @Test public void store_all_valid_for_config() { infrastructureVersions.setTargetVersion(NodeType.config, version, false); infrastructureVersions.setTargetVersion(NodeType.confighost, version, false); infrastructureVersions.setTargetVersion(NodeType.proxyhost, version, false); assertThrows(IllegalArgumentException.class, () -> infrastructureVersions.setTargetVersion(NodeType.controller, version, false)); assertThrows(IllegalArgumentException.class, () -> infrastructureVersions.setTargetVersion(NodeType.controllerhost, version, false)); Map<NodeType, Version> expected = Map.of( NodeType.config, version, NodeType.confighost, version, NodeType.proxyhost, version); assertEquals(expected, infrastructureVersions.getTargetVersions()); } @Test public void store_all_valid_for_controller() { InfrastructureVersions infrastructureVersions = new InfrastructureVersions(tester.nodeRepository().database(), NodeType.controller, defaultVersion); infrastructureVersions.setTargetVersion(NodeType.controller, version, false); infrastructureVersions.setTargetVersion(NodeType.controllerhost, version, false); infrastructureVersions.setTargetVersion(NodeType.proxyhost, version, false); assertThrows(IllegalArgumentException.class, () -> infrastructureVersions.setTargetVersion(NodeType.config, version, false)); assertThrows(IllegalArgumentException.class, () -> infrastructureVersions.setTargetVersion(NodeType.confighost, version, false)); Map<NodeType, Version> expected = Map.of( NodeType.controller, version, NodeType.controllerhost, version, NodeType.proxyhost, version); assertEquals(expected, infrastructureVersions.getTargetVersions()); } private static void assertThrows(Class<? extends Throwable> clazz, Runnable runnable) { try { runnable.run(); fail("Expected " + clazz); } catch (Throwable e) { if (!clazz.isInstance(e)) throw e; } } }
How many logserver-containers services/processes/configids are there in an application? Always One?
Optional<String> findSlobrokServiceName(ServiceType serviceType, ConfigId configId) { switch (serviceType.s()) { case "adminserver": case "config-sentinel": case "configproxy": case "configserver": case "logd": case "logserver": case "metricsproxy": case "slobrok": case "transactionlogserver": return Optional.empty(); case "topleveldispatch": return Optional.of(configId.s()); case "qrserver": case "container": case "container-clustercontroller": case "logserver-container": case "metricsproxy-container": return Optional.of("vespa/service/" + configId.s()); case "searchnode": return Optional.of(configId.s() + "/realtimecontroller"); case "distributor": case "storagenode": return Optional.of("storage/cluster." + configId.s()); default: logger.log(LogLevel.DEBUG, "Unknown service type " + serviceType.s() + " with config id " + configId.s()); return Optional.empty(); } }
case "logserver-container":
Optional<String> findSlobrokServiceName(ServiceType serviceType, ConfigId configId) { switch (serviceType.s()) { case "adminserver": case "config-sentinel": case "configproxy": case "configserver": case "logd": case "logserver": case "metricsproxy": case "slobrok": case "transactionlogserver": return Optional.empty(); case "topleveldispatch": return Optional.of(configId.s()); case "qrserver": case "container": case "container-clustercontroller": case "logserver-container": case "metricsproxy-container": return Optional.of("vespa/service/" + configId.s()); case "searchnode": return Optional.of(configId.s() + "/realtimecontroller"); case "distributor": case "storagenode": return Optional.of("storage/cluster." + configId.s()); default: logger.log(LogLevel.DEBUG, "Unknown service type " + serviceType.s() + " with config id " + configId.s()); return Optional.empty(); } }
class SlobrokMonitorManagerImpl implements SlobrokApi, MonitorManager { private static final Logger logger = Logger.getLogger(SlobrokMonitorManagerImpl.class.getName()); private final Supplier<SlobrokMonitor> slobrokMonitorFactory; private final Object monitor = new Object(); private final HashMap<ApplicationId, SlobrokMonitor> slobrokMonitors = new HashMap<>(); private final DuperModelManager duperModel; @Inject public SlobrokMonitorManagerImpl(DuperModelManager duperModel) { this(SlobrokMonitor::new, duperModel); } SlobrokMonitorManagerImpl(Supplier<SlobrokMonitor> slobrokMonitorFactory, DuperModelManager duperModel) { this.slobrokMonitorFactory = slobrokMonitorFactory; this.duperModel = duperModel; } @Override public void applicationActivated(ApplicationInfo application) { if (wouldNotMonitor(application.getApplicationId())) { return; } synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.computeIfAbsent( application.getApplicationId(), id -> slobrokMonitorFactory.get()); slobrokMonitor.updateSlobrokList(application); } } @Override public void applicationRemoved(ApplicationId id) { if (wouldNotMonitor(id)) { return; } synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.remove(id); if (slobrokMonitor == null) { logger.log(LogLevel.WARNING, "Removed application " + id + ", but it was never registered"); } else { slobrokMonitor.close(); } } } @Override public List<Mirror.Entry> lookup(ApplicationId id, String pattern) { synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.get(id); if (slobrokMonitor == null) { throw new IllegalArgumentException("Slobrok manager has no knowledge of application " + id); } else { return slobrokMonitor.lookup(pattern); } } } @Override public ServiceStatusInfo getStatus(ApplicationId applicationId, ClusterId clusterId, ServiceType serviceType, ConfigId configId) { if (wouldNotMonitor(applicationId)) { return new ServiceStatusInfo(ServiceStatus.NOT_CHECKED); } Optional<String> slobrokServiceName = findSlobrokServiceName(serviceType, configId); if (slobrokServiceName.isPresent()) { synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.get(applicationId); if (slobrokMonitor != null && slobrokMonitor.registeredInSlobrok(slobrokServiceName.get())) { return new ServiceStatusInfo(ServiceStatus.UP); } else { return new ServiceStatusInfo(ServiceStatus.DOWN); } } } else { return new ServiceStatusInfo(ServiceStatus.NOT_CHECKED); } } private boolean wouldNotMonitor(ApplicationId applicationId) { return duperModel.isSupportedInfraApplication(applicationId); } /** * Get the Slobrok service name of the service, or empty if the service * is not registered with Slobrok. */ }
class SlobrokMonitorManagerImpl implements SlobrokApi, MonitorManager { private static final Logger logger = Logger.getLogger(SlobrokMonitorManagerImpl.class.getName()); private final Supplier<SlobrokMonitor> slobrokMonitorFactory; private final Object monitor = new Object(); private final HashMap<ApplicationId, SlobrokMonitor> slobrokMonitors = new HashMap<>(); private final DuperModelManager duperModel; @Inject public SlobrokMonitorManagerImpl(DuperModelManager duperModel) { this(SlobrokMonitor::new, duperModel); } SlobrokMonitorManagerImpl(Supplier<SlobrokMonitor> slobrokMonitorFactory, DuperModelManager duperModel) { this.slobrokMonitorFactory = slobrokMonitorFactory; this.duperModel = duperModel; } @Override public void applicationActivated(ApplicationInfo application) { if (wouldNotMonitor(application.getApplicationId())) { return; } synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.computeIfAbsent( application.getApplicationId(), id -> slobrokMonitorFactory.get()); slobrokMonitor.updateSlobrokList(application); } } @Override public void applicationRemoved(ApplicationId id) { if (wouldNotMonitor(id)) { return; } synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.remove(id); if (slobrokMonitor == null) { logger.log(LogLevel.WARNING, "Removed application " + id + ", but it was never registered"); } else { slobrokMonitor.close(); } } } @Override public List<Mirror.Entry> lookup(ApplicationId id, String pattern) { synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.get(id); if (slobrokMonitor == null) { throw new IllegalArgumentException("Slobrok manager has no knowledge of application " + id); } else { return slobrokMonitor.lookup(pattern); } } } @Override public ServiceStatusInfo getStatus(ApplicationId applicationId, ClusterId clusterId, ServiceType serviceType, ConfigId configId) { if (wouldNotMonitor(applicationId)) { return new ServiceStatusInfo(ServiceStatus.NOT_CHECKED); } Optional<String> slobrokServiceName = findSlobrokServiceName(serviceType, configId); if (slobrokServiceName.isPresent()) { synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.get(applicationId); if (slobrokMonitor != null && slobrokMonitor.registeredInSlobrok(slobrokServiceName.get())) { return new ServiceStatusInfo(ServiceStatus.UP); } else { return new ServiceStatusInfo(ServiceStatus.DOWN); } } } else { return new ServiceStatusInfo(ServiceStatus.NOT_CHECKED); } } private boolean wouldNotMonitor(ApplicationId applicationId) { return duperModel.isSupportedInfraApplication(applicationId); } /** * Get the Slobrok service name of the service, or empty if the service * is not registered with Slobrok. */ }
0 or 1 (only Vespa 6 applications have 0)
Optional<String> findSlobrokServiceName(ServiceType serviceType, ConfigId configId) { switch (serviceType.s()) { case "adminserver": case "config-sentinel": case "configproxy": case "configserver": case "logd": case "logserver": case "metricsproxy": case "slobrok": case "transactionlogserver": return Optional.empty(); case "topleveldispatch": return Optional.of(configId.s()); case "qrserver": case "container": case "container-clustercontroller": case "logserver-container": case "metricsproxy-container": return Optional.of("vespa/service/" + configId.s()); case "searchnode": return Optional.of(configId.s() + "/realtimecontroller"); case "distributor": case "storagenode": return Optional.of("storage/cluster." + configId.s()); default: logger.log(LogLevel.DEBUG, "Unknown service type " + serviceType.s() + " with config id " + configId.s()); return Optional.empty(); } }
case "logserver-container":
Optional<String> findSlobrokServiceName(ServiceType serviceType, ConfigId configId) { switch (serviceType.s()) { case "adminserver": case "config-sentinel": case "configproxy": case "configserver": case "logd": case "logserver": case "metricsproxy": case "slobrok": case "transactionlogserver": return Optional.empty(); case "topleveldispatch": return Optional.of(configId.s()); case "qrserver": case "container": case "container-clustercontroller": case "logserver-container": case "metricsproxy-container": return Optional.of("vespa/service/" + configId.s()); case "searchnode": return Optional.of(configId.s() + "/realtimecontroller"); case "distributor": case "storagenode": return Optional.of("storage/cluster." + configId.s()); default: logger.log(LogLevel.DEBUG, "Unknown service type " + serviceType.s() + " with config id " + configId.s()); return Optional.empty(); } }
class SlobrokMonitorManagerImpl implements SlobrokApi, MonitorManager { private static final Logger logger = Logger.getLogger(SlobrokMonitorManagerImpl.class.getName()); private final Supplier<SlobrokMonitor> slobrokMonitorFactory; private final Object monitor = new Object(); private final HashMap<ApplicationId, SlobrokMonitor> slobrokMonitors = new HashMap<>(); private final DuperModelManager duperModel; @Inject public SlobrokMonitorManagerImpl(DuperModelManager duperModel) { this(SlobrokMonitor::new, duperModel); } SlobrokMonitorManagerImpl(Supplier<SlobrokMonitor> slobrokMonitorFactory, DuperModelManager duperModel) { this.slobrokMonitorFactory = slobrokMonitorFactory; this.duperModel = duperModel; } @Override public void applicationActivated(ApplicationInfo application) { if (wouldNotMonitor(application.getApplicationId())) { return; } synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.computeIfAbsent( application.getApplicationId(), id -> slobrokMonitorFactory.get()); slobrokMonitor.updateSlobrokList(application); } } @Override public void applicationRemoved(ApplicationId id) { if (wouldNotMonitor(id)) { return; } synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.remove(id); if (slobrokMonitor == null) { logger.log(LogLevel.WARNING, "Removed application " + id + ", but it was never registered"); } else { slobrokMonitor.close(); } } } @Override public List<Mirror.Entry> lookup(ApplicationId id, String pattern) { synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.get(id); if (slobrokMonitor == null) { throw new IllegalArgumentException("Slobrok manager has no knowledge of application " + id); } else { return slobrokMonitor.lookup(pattern); } } } @Override public ServiceStatusInfo getStatus(ApplicationId applicationId, ClusterId clusterId, ServiceType serviceType, ConfigId configId) { if (wouldNotMonitor(applicationId)) { return new ServiceStatusInfo(ServiceStatus.NOT_CHECKED); } Optional<String> slobrokServiceName = findSlobrokServiceName(serviceType, configId); if (slobrokServiceName.isPresent()) { synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.get(applicationId); if (slobrokMonitor != null && slobrokMonitor.registeredInSlobrok(slobrokServiceName.get())) { return new ServiceStatusInfo(ServiceStatus.UP); } else { return new ServiceStatusInfo(ServiceStatus.DOWN); } } } else { return new ServiceStatusInfo(ServiceStatus.NOT_CHECKED); } } private boolean wouldNotMonitor(ApplicationId applicationId) { return duperModel.isSupportedInfraApplication(applicationId); } /** * Get the Slobrok service name of the service, or empty if the service * is not registered with Slobrok. */ }
class SlobrokMonitorManagerImpl implements SlobrokApi, MonitorManager { private static final Logger logger = Logger.getLogger(SlobrokMonitorManagerImpl.class.getName()); private final Supplier<SlobrokMonitor> slobrokMonitorFactory; private final Object monitor = new Object(); private final HashMap<ApplicationId, SlobrokMonitor> slobrokMonitors = new HashMap<>(); private final DuperModelManager duperModel; @Inject public SlobrokMonitorManagerImpl(DuperModelManager duperModel) { this(SlobrokMonitor::new, duperModel); } SlobrokMonitorManagerImpl(Supplier<SlobrokMonitor> slobrokMonitorFactory, DuperModelManager duperModel) { this.slobrokMonitorFactory = slobrokMonitorFactory; this.duperModel = duperModel; } @Override public void applicationActivated(ApplicationInfo application) { if (wouldNotMonitor(application.getApplicationId())) { return; } synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.computeIfAbsent( application.getApplicationId(), id -> slobrokMonitorFactory.get()); slobrokMonitor.updateSlobrokList(application); } } @Override public void applicationRemoved(ApplicationId id) { if (wouldNotMonitor(id)) { return; } synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.remove(id); if (slobrokMonitor == null) { logger.log(LogLevel.WARNING, "Removed application " + id + ", but it was never registered"); } else { slobrokMonitor.close(); } } } @Override public List<Mirror.Entry> lookup(ApplicationId id, String pattern) { synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.get(id); if (slobrokMonitor == null) { throw new IllegalArgumentException("Slobrok manager has no knowledge of application " + id); } else { return slobrokMonitor.lookup(pattern); } } } @Override public ServiceStatusInfo getStatus(ApplicationId applicationId, ClusterId clusterId, ServiceType serviceType, ConfigId configId) { if (wouldNotMonitor(applicationId)) { return new ServiceStatusInfo(ServiceStatus.NOT_CHECKED); } Optional<String> slobrokServiceName = findSlobrokServiceName(serviceType, configId); if (slobrokServiceName.isPresent()) { synchronized (monitor) { SlobrokMonitor slobrokMonitor = slobrokMonitors.get(applicationId); if (slobrokMonitor != null && slobrokMonitor.registeredInSlobrok(slobrokServiceName.get())) { return new ServiceStatusInfo(ServiceStatus.UP); } else { return new ServiceStatusInfo(ServiceStatus.DOWN); } } } else { return new ServiceStatusInfo(ServiceStatus.NOT_CHECKED); } } private boolean wouldNotMonitor(ApplicationId applicationId) { return duperModel.isSupportedInfraApplication(applicationId); } /** * Get the Slobrok service name of the service, or empty if the service * is not registered with Slobrok. */ }
Should throw for null parameters (`Objects.requireNonNull`).
public Endpoint(Optional<String> endpointId, String containerId, Set<String> regions) { this.endpointId = endpointId; this.containerId = containerId; this.regions = Set.copyOf(regions); }
this.regions = Set.copyOf(regions);
public Endpoint(Optional<String> endpointId, String containerId, Set<String> regions) { this.endpointId = endpointId; this.containerId = containerId; this.regions = Set.copyOf( Objects.requireNonNull( regions.stream().map(RegionName::from).collect(Collectors.toList()), "Missing 'regions' parameter")); }
class Endpoint { private final Optional<String> endpointId; private final String containerId; private final Set<String> regions; public String endpointId() { return endpointId.orElse(containerId); } public String containerId() { return containerId; } public Set<String> regions() { return regions; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Endpoint endpoint = (Endpoint) o; return Objects.equals(endpointId, endpoint.endpointId) && Objects.equals(containerId, endpoint.containerId) && Objects.equals(regions, endpoint.regions); } @Override public int hashCode() { return Objects.hash(endpointId, containerId, regions); } }
class Endpoint { private final Optional<String> endpointId; private final String containerId; private final Set<RegionName> regions; public String endpointId() { return endpointId.orElse(containerId); } public String containerId() { return containerId; } public Set<RegionName> regions() { return regions; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Endpoint endpoint = (Endpoint) o; return Objects.equals(endpointId, endpoint.endpointId) && Objects.equals(containerId, endpoint.containerId) && Objects.equals(regions, endpoint.regions); } @Override public int hashCode() { return Objects.hash(endpointId, containerId, regions); } }
This should should check if region is already present in `regions` and throw. A duplicate region is likely a mistake/typo by the tenant.
private List<Endpoint> readEndpoints(Element root) { final var endpointsElement = XML.getChild(root, endpointsTag); if (endpointsElement == null) { return Collections.emptyList(); } final var endpoints = new ArrayList<Endpoint>(); for (var endpointElement : XML.getChildren(endpointsElement, endpointTag)) { final Optional<String> rotationId = stringAttribute("id", endpointElement); final Optional<String> containerId = stringAttribute("container-id", endpointElement); final var regions = new HashSet<String>(); if (containerId.isEmpty()) { throw new IllegalArgumentException("Missing 'container-id' from 'endpoint' tag."); } for (var regionElement : XML.getChildren(endpointElement, "region")) { var region = regionElement.getTextContent(); if (region == null || region.isEmpty() || region.isBlank()) { throw new IllegalArgumentException("Empty 'region' element in 'endpoint' tag."); } regions.add(regionElement.getTextContent()); } endpoints.add(new Endpoint(rotationId, containerId.get(), regions)); } return endpoints; }
regions.add(regionElement.getTextContent());
private List<Endpoint> readEndpoints(Element root) { final var endpointsElement = XML.getChild(root, endpointsTag); if (endpointsElement == null) { return Collections.emptyList(); } final var endpoints = new ArrayList<Endpoint>(); for (var endpointElement : XML.getChildren(endpointsElement, endpointTag)) { final Optional<String> rotationId = stringAttribute("id", endpointElement); final Optional<String> containerId = stringAttribute("container-id", endpointElement); final var regions = new HashSet<String>(); if (containerId.isEmpty()) { throw new IllegalArgumentException("Missing 'container-id' from 'endpoint' tag."); } for (var regionElement : XML.getChildren(endpointElement, "region")) { var region = regionElement.getTextContent(); if (region == null || region.isEmpty() || region.isBlank()) { throw new IllegalArgumentException("Empty 'region' element in 'endpoint' tag."); } if (regions.contains(region)) { throw new IllegalArgumentException("Duplicate 'region' element in 'endpoint' tag: " + region); } regions.add(region); } endpoints.add(new Endpoint(rotationId, containerId.get(), regions)); } return endpoints; }
class DeploymentSpecXmlReader { private static final String majorVersionTag = "major-version"; private static final String testTag = "test"; private static final String stagingTag = "staging"; private static final String blockChangeTag = "block-change"; private static final String prodTag = "prod"; private static final String endpointsTag = "endpoints"; private static final String endpointTag = "endpoint"; private final boolean validate; /** * Creates a validating reader */ public DeploymentSpecXmlReader() { this(true); } /** * Creates a reader * * @param validate true to validate the input, false to accept any input which can be unabiguously parsed */ public DeploymentSpecXmlReader(boolean validate) { this.validate = validate; } public DeploymentSpec read(Reader reader) { try { return read(IOUtils.readAll(reader)); } catch (IOException e) { throw new IllegalArgumentException("Could not read deployment spec", e); } } /** * Reads a deployment spec from XML */ public DeploymentSpec read(String xmlForm) { List<Step> steps = new ArrayList<>(); Optional<String> globalServiceId = Optional.empty(); Element root = XML.getDocument(xmlForm).getDocumentElement(); if (validate) validateTagOrder(root); for (Element environmentTag : XML.getChildren(root)) { if (!isEnvironmentName(environmentTag.getTagName())) continue; Environment environment = Environment.from(environmentTag.getTagName()); Optional<AthenzService> athenzService = stringAttribute("athenz-service", environmentTag).map(AthenzService::from); Optional<String> testerFlavor = stringAttribute("tester-flavor", environmentTag); if (environment == Environment.prod) { for (Element stepTag : XML.getChildren(environmentTag)) { if (stepTag.getTagName().equals("delay")) { steps.add(new Delay(Duration.ofSeconds(longAttribute("hours", stepTag) * 60 * 60 + longAttribute("minutes", stepTag) * 60 + longAttribute("seconds", stepTag)))); } else if (stepTag.getTagName().equals("parallel")) { List<DeclaredZone> zones = new ArrayList<>(); for (Element regionTag : XML.getChildren(stepTag)) { zones.add(readDeclaredZone(environment, athenzService, testerFlavor, regionTag)); } steps.add(new ParallelZones(zones)); } else { steps.add(readDeclaredZone(environment, athenzService, testerFlavor, stepTag)); } } } else { steps.add(new DeclaredZone(environment, Optional.empty(), false, athenzService, testerFlavor)); } if (environment == Environment.prod) globalServiceId = readGlobalServiceId(environmentTag); else if (readGlobalServiceId(environmentTag).isPresent()) throw new IllegalArgumentException("Attribute 'global-service-id' is only valid on 'prod' tag."); } Optional<AthenzDomain> athenzDomain = stringAttribute("athenz-domain", root).map(AthenzDomain::from); Optional<AthenzService> athenzService = stringAttribute("athenz-service", root).map(AthenzService::from); return new DeploymentSpec(globalServiceId, readUpgradePolicy(root), optionalIntegerAttribute(majorVersionTag, root), readChangeBlockers(root), steps, xmlForm, athenzDomain, athenzService, readNotifications(root), readEndpoints(root)); } private Notifications readNotifications(Element root) { Element notificationsElement = XML.getChild(root, "notifications"); if (notificationsElement == null) return Notifications.none(); When defaultWhen = stringAttribute("when", notificationsElement).map(When::fromValue).orElse(When.failingCommit); Map<When, List<String>> emailAddresses = new HashMap<>(); Map<When, List<Role>> emailRoles = new HashMap<>(); for (When when : When.values()) { emailAddresses.put(when, new ArrayList<>()); emailRoles.put(when, new ArrayList<>()); } for (Element emailElement : XML.getChildren(notificationsElement, "email")) { Optional<String> addressAttribute = stringAttribute("address", emailElement); Optional<Role> roleAttribute = stringAttribute("role", emailElement).map(Role::fromValue); When when = stringAttribute("when", emailElement).map(When::fromValue).orElse(defaultWhen); if (addressAttribute.isPresent() == roleAttribute.isPresent()) throw new IllegalArgumentException("Exactly one of 'role' and 'address' must be present in 'email' elements."); addressAttribute.ifPresent(address -> emailAddresses.get(when).add(address)); roleAttribute.ifPresent(role -> emailRoles.get(when).add(role)); } return Notifications.of(emailAddresses, emailRoles); } /** * Imposes some constraints on tag order which are not expressible in the schema */ private void validateTagOrder(Element root) { List<String> tags = XML.getChildren(root).stream().map(Element::getTagName).collect(Collectors.toList()); for (int i = 0; i < tags.size(); i++) { if (tags.get(i).equals(blockChangeTag)) { String constraint = "<block-change> must be placed after <test> and <staging> and before <prod>"; if (containsAfter(i, testTag, tags)) throw new IllegalArgumentException(constraint); if (containsAfter(i, stagingTag, tags)) throw new IllegalArgumentException(constraint); if (containsBefore(i, prodTag, tags)) throw new IllegalArgumentException(constraint); } } } private boolean containsAfter(int i, String item, List<String> items) { return items.subList(i + 1, items.size()).contains(item); } private boolean containsBefore(int i, String item, List<String> items) { return items.subList(0, i).contains(item); } /** * Returns the given attribute as an integer, or 0 if it is not present */ private long longAttribute(String attributeName, Element tag) { String value = tag.getAttribute(attributeName); if (value == null || value.isEmpty()) return 0; try { return Long.parseLong(value); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer for attribute '" + attributeName + "' but got '" + value + "'"); } } /** * Returns the given attribute as an integer, or 0 if it is not present */ private Optional<Integer> optionalIntegerAttribute(String attributeName, Element tag) { String value = tag.getAttribute(attributeName); if (value == null || value.isEmpty()) return Optional.empty(); try { return Optional.of(Integer.parseInt(value)); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer for attribute '" + attributeName + "' but got '" + value + "'"); } } /** * Returns the given attribute as a string, or Optional.empty if it is not present or empty */ private Optional<String> stringAttribute(String attributeName, Element tag) { String value = tag.getAttribute(attributeName); return Optional.ofNullable(value).filter(s -> !s.equals("")); } private boolean isEnvironmentName(String tagName) { return tagName.equals(testTag) || tagName.equals(stagingTag) || tagName.equals(prodTag); } private DeclaredZone readDeclaredZone(Environment environment, Optional<AthenzService> athenzService, Optional<String> testerFlavor, Element regionTag) { return new DeclaredZone(environment, Optional.of(RegionName.from(XML.getValue(regionTag).trim())), readActive(regionTag), athenzService, testerFlavor); } private Optional<String> readGlobalServiceId(Element environmentTag) { String globalServiceId = environmentTag.getAttribute("global-service-id"); if (globalServiceId == null || globalServiceId.isEmpty()) { return Optional.empty(); } else { return Optional.of(globalServiceId); } } private List<DeploymentSpec.ChangeBlocker> readChangeBlockers(Element root) { List<DeploymentSpec.ChangeBlocker> changeBlockers = new ArrayList<>(); for (Element tag : XML.getChildren(root)) { if (!blockChangeTag.equals(tag.getTagName())) continue; boolean blockVersions = trueOrMissing(tag.getAttribute("version")); boolean blockRevisions = trueOrMissing(tag.getAttribute("revision")); String daySpec = tag.getAttribute("days"); String hourSpec = tag.getAttribute("hours"); String zoneSpec = tag.getAttribute("time-zone"); if (zoneSpec.isEmpty()) { zoneSpec = "UTC"; } changeBlockers.add(new DeploymentSpec.ChangeBlocker(blockRevisions, blockVersions, TimeWindow.from(daySpec, hourSpec, zoneSpec))); } return Collections.unmodifiableList(changeBlockers); } /** * Returns true if the given value is "true", or if it is missing */ private boolean trueOrMissing(String value) { return value == null || value.isEmpty() || value.equals("true"); } private DeploymentSpec.UpgradePolicy readUpgradePolicy(Element root) { Element upgradeElement = XML.getChild(root, "upgrade"); if (upgradeElement == null) return DeploymentSpec.UpgradePolicy.defaultPolicy; String policy = upgradeElement.getAttribute("policy"); switch (policy) { case "canary": return DeploymentSpec.UpgradePolicy.canary; case "default": return DeploymentSpec.UpgradePolicy.defaultPolicy; case "conservative": return DeploymentSpec.UpgradePolicy.conservative; default: throw new IllegalArgumentException("Illegal upgrade policy '" + policy + "': " + "Must be one of " + Arrays.toString(DeploymentSpec.UpgradePolicy.values())); } } private boolean readActive(Element regionTag) { String activeValue = regionTag.getAttribute("active"); if ("true".equals(activeValue)) return true; if ("false".equals(activeValue)) return false; throw new IllegalArgumentException("Region tags must have an 'active' attribute set to 'true' or 'false' " + "to control whether the region should receive production traffic"); } }
class DeploymentSpecXmlReader { private static final String majorVersionTag = "major-version"; private static final String testTag = "test"; private static final String stagingTag = "staging"; private static final String blockChangeTag = "block-change"; private static final String prodTag = "prod"; private static final String endpointsTag = "endpoints"; private static final String endpointTag = "endpoint"; private final boolean validate; /** * Creates a validating reader */ public DeploymentSpecXmlReader() { this(true); } /** * Creates a reader * * @param validate true to validate the input, false to accept any input which can be unabiguously parsed */ public DeploymentSpecXmlReader(boolean validate) { this.validate = validate; } public DeploymentSpec read(Reader reader) { try { return read(IOUtils.readAll(reader)); } catch (IOException e) { throw new IllegalArgumentException("Could not read deployment spec", e); } } /** * Reads a deployment spec from XML */ public DeploymentSpec read(String xmlForm) { List<Step> steps = new ArrayList<>(); Optional<String> globalServiceId = Optional.empty(); Element root = XML.getDocument(xmlForm).getDocumentElement(); if (validate) validateTagOrder(root); for (Element environmentTag : XML.getChildren(root)) { if (!isEnvironmentName(environmentTag.getTagName())) continue; Environment environment = Environment.from(environmentTag.getTagName()); Optional<AthenzService> athenzService = stringAttribute("athenz-service", environmentTag).map(AthenzService::from); Optional<String> testerFlavor = stringAttribute("tester-flavor", environmentTag); if (environment == Environment.prod) { for (Element stepTag : XML.getChildren(environmentTag)) { if (stepTag.getTagName().equals("delay")) { steps.add(new Delay(Duration.ofSeconds(longAttribute("hours", stepTag) * 60 * 60 + longAttribute("minutes", stepTag) * 60 + longAttribute("seconds", stepTag)))); } else if (stepTag.getTagName().equals("parallel")) { List<DeclaredZone> zones = new ArrayList<>(); for (Element regionTag : XML.getChildren(stepTag)) { zones.add(readDeclaredZone(environment, athenzService, testerFlavor, regionTag)); } steps.add(new ParallelZones(zones)); } else { steps.add(readDeclaredZone(environment, athenzService, testerFlavor, stepTag)); } } } else { steps.add(new DeclaredZone(environment, Optional.empty(), false, athenzService, testerFlavor)); } if (environment == Environment.prod) globalServiceId = readGlobalServiceId(environmentTag); else if (readGlobalServiceId(environmentTag).isPresent()) throw new IllegalArgumentException("Attribute 'global-service-id' is only valid on 'prod' tag."); } Optional<AthenzDomain> athenzDomain = stringAttribute("athenz-domain", root).map(AthenzDomain::from); Optional<AthenzService> athenzService = stringAttribute("athenz-service", root).map(AthenzService::from); return new DeploymentSpec(globalServiceId, readUpgradePolicy(root), optionalIntegerAttribute(majorVersionTag, root), readChangeBlockers(root), steps, xmlForm, athenzDomain, athenzService, readNotifications(root), readEndpoints(root)); } private Notifications readNotifications(Element root) { Element notificationsElement = XML.getChild(root, "notifications"); if (notificationsElement == null) return Notifications.none(); When defaultWhen = stringAttribute("when", notificationsElement).map(When::fromValue).orElse(When.failingCommit); Map<When, List<String>> emailAddresses = new HashMap<>(); Map<When, List<Role>> emailRoles = new HashMap<>(); for (When when : When.values()) { emailAddresses.put(when, new ArrayList<>()); emailRoles.put(when, new ArrayList<>()); } for (Element emailElement : XML.getChildren(notificationsElement, "email")) { Optional<String> addressAttribute = stringAttribute("address", emailElement); Optional<Role> roleAttribute = stringAttribute("role", emailElement).map(Role::fromValue); When when = stringAttribute("when", emailElement).map(When::fromValue).orElse(defaultWhen); if (addressAttribute.isPresent() == roleAttribute.isPresent()) throw new IllegalArgumentException("Exactly one of 'role' and 'address' must be present in 'email' elements."); addressAttribute.ifPresent(address -> emailAddresses.get(when).add(address)); roleAttribute.ifPresent(role -> emailRoles.get(when).add(role)); } return Notifications.of(emailAddresses, emailRoles); } /** * Imposes some constraints on tag order which are not expressible in the schema */ private void validateTagOrder(Element root) { List<String> tags = XML.getChildren(root).stream().map(Element::getTagName).collect(Collectors.toList()); for (int i = 0; i < tags.size(); i++) { if (tags.get(i).equals(blockChangeTag)) { String constraint = "<block-change> must be placed after <test> and <staging> and before <prod>"; if (containsAfter(i, testTag, tags)) throw new IllegalArgumentException(constraint); if (containsAfter(i, stagingTag, tags)) throw new IllegalArgumentException(constraint); if (containsBefore(i, prodTag, tags)) throw new IllegalArgumentException(constraint); } } } private boolean containsAfter(int i, String item, List<String> items) { return items.subList(i + 1, items.size()).contains(item); } private boolean containsBefore(int i, String item, List<String> items) { return items.subList(0, i).contains(item); } /** * Returns the given attribute as an integer, or 0 if it is not present */ private long longAttribute(String attributeName, Element tag) { String value = tag.getAttribute(attributeName); if (value == null || value.isEmpty()) return 0; try { return Long.parseLong(value); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer for attribute '" + attributeName + "' but got '" + value + "'"); } } /** * Returns the given attribute as an integer, or 0 if it is not present */ private Optional<Integer> optionalIntegerAttribute(String attributeName, Element tag) { String value = tag.getAttribute(attributeName); if (value == null || value.isEmpty()) return Optional.empty(); try { return Optional.of(Integer.parseInt(value)); } catch (NumberFormatException e) { throw new IllegalArgumentException("Expected an integer for attribute '" + attributeName + "' but got '" + value + "'"); } } /** * Returns the given attribute as a string, or Optional.empty if it is not present or empty */ private Optional<String> stringAttribute(String attributeName, Element tag) { String value = tag.getAttribute(attributeName); return Optional.ofNullable(value).filter(s -> !s.equals("")); } private boolean isEnvironmentName(String tagName) { return tagName.equals(testTag) || tagName.equals(stagingTag) || tagName.equals(prodTag); } private DeclaredZone readDeclaredZone(Environment environment, Optional<AthenzService> athenzService, Optional<String> testerFlavor, Element regionTag) { return new DeclaredZone(environment, Optional.of(RegionName.from(XML.getValue(regionTag).trim())), readActive(regionTag), athenzService, testerFlavor); } private Optional<String> readGlobalServiceId(Element environmentTag) { String globalServiceId = environmentTag.getAttribute("global-service-id"); if (globalServiceId == null || globalServiceId.isEmpty()) { return Optional.empty(); } else { return Optional.of(globalServiceId); } } private List<DeploymentSpec.ChangeBlocker> readChangeBlockers(Element root) { List<DeploymentSpec.ChangeBlocker> changeBlockers = new ArrayList<>(); for (Element tag : XML.getChildren(root)) { if (!blockChangeTag.equals(tag.getTagName())) continue; boolean blockVersions = trueOrMissing(tag.getAttribute("version")); boolean blockRevisions = trueOrMissing(tag.getAttribute("revision")); String daySpec = tag.getAttribute("days"); String hourSpec = tag.getAttribute("hours"); String zoneSpec = tag.getAttribute("time-zone"); if (zoneSpec.isEmpty()) { zoneSpec = "UTC"; } changeBlockers.add(new DeploymentSpec.ChangeBlocker(blockRevisions, blockVersions, TimeWindow.from(daySpec, hourSpec, zoneSpec))); } return Collections.unmodifiableList(changeBlockers); } /** * Returns true if the given value is "true", or if it is missing */ private boolean trueOrMissing(String value) { return value == null || value.isEmpty() || value.equals("true"); } private DeploymentSpec.UpgradePolicy readUpgradePolicy(Element root) { Element upgradeElement = XML.getChild(root, "upgrade"); if (upgradeElement == null) return DeploymentSpec.UpgradePolicy.defaultPolicy; String policy = upgradeElement.getAttribute("policy"); switch (policy) { case "canary": return DeploymentSpec.UpgradePolicy.canary; case "default": return DeploymentSpec.UpgradePolicy.defaultPolicy; case "conservative": return DeploymentSpec.UpgradePolicy.conservative; default: throw new IllegalArgumentException("Illegal upgrade policy '" + policy + "': " + "Must be one of " + Arrays.toString(DeploymentSpec.UpgradePolicy.values())); } } private boolean readActive(Element regionTag) { String activeValue = regionTag.getAttribute("active"); if ("true".equals(activeValue)) return true; if ("false".equals(activeValue)) return false; throw new IllegalArgumentException("Region tags must have an 'active' attribute set to 'true' or 'false' " + "to control whether the region should receive production traffic"); } }
Should add a test for this scenario, which I assume is "system without proxy app"?
protected boolean convergedOn(Version target, SystemApplication application, ZoneId zone) { Optional<Version> minVersion = minVersion(zone, application, Node::currentVersion); if (minVersion.isEmpty()) return true; return minVersion.get().equals(target) && application.configConvergedIn(zone, controller(), Optional.of(target)); }
if (minVersion.isEmpty()) return true;
protected boolean convergedOn(Version target, SystemApplication application, ZoneId zone) { Optional<Version> minVersion = minVersion(zone, application, Node::currentVersion); if (minVersion.isEmpty()) return true; return minVersion.get().equals(target) && application.configConvergedIn(zone, controller(), Optional.of(target)); }
class SystemUpgrader extends InfrastructureUpgrader { private static final Logger log = Logger.getLogger(SystemUpgrader.class.getName()); private static final Set<Node.State> upgradableNodeStates = EnumSet.of(Node.State.active, Node.State.reserved); public SystemUpgrader(Controller controller, Duration interval, JobControl jobControl) { super(controller, interval, jobControl, controller.zoneRegistry().upgradePolicy(), null); } @Override protected void upgrade(Version target, SystemApplication application, ZoneId zone) { if (minVersion(zone, application, Node::wantedVersion).map(target::isAfter) .orElse(true)) { log.info(String.format("Deploying %s version %s in %s", application.id(), target, zone)); controller().applications().deploy(application, zone, target); } } @Override @Override protected boolean requireUpgradeOf(Node node, SystemApplication application, ZoneId zone) { return eligibleForUpgrade(node); } @Override protected Optional<Version> targetVersion() { return controller().versionStatus().controllerVersion() .filter(vespaVersion -> !vespaVersion.isSystemVersion()) .filter(vespaVersion -> vespaVersion.confidence() != VespaVersion.Confidence.broken) .map(VespaVersion::versionNumber); } /** Returns whether node in application should be upgraded by this */ public static boolean eligibleForUpgrade(Node node) { return upgradableNodeStates.contains(node.state()); } }
class SystemUpgrader extends InfrastructureUpgrader { private static final Logger log = Logger.getLogger(SystemUpgrader.class.getName()); private static final Set<Node.State> upgradableNodeStates = EnumSet.of(Node.State.active, Node.State.reserved); public SystemUpgrader(Controller controller, Duration interval, JobControl jobControl) { super(controller, interval, jobControl, controller.zoneRegistry().upgradePolicy(), null); } @Override protected void upgrade(Version target, SystemApplication application, ZoneId zone) { if (minVersion(zone, application, Node::wantedVersion).map(target::isAfter) .orElse(true)) { log.info(String.format("Deploying %s version %s in %s", application.id(), target, zone)); controller().applications().deploy(application, zone, target); } } @Override @Override protected boolean requireUpgradeOf(Node node, SystemApplication application, ZoneId zone) { return eligibleForUpgrade(node); } @Override protected Optional<Version> targetVersion() { return controller().versionStatus().controllerVersion() .filter(vespaVersion -> !vespaVersion.isSystemVersion()) .filter(vespaVersion -> vespaVersion.confidence() != VespaVersion.Confidence.broken) .map(VespaVersion::versionNumber); } /** Returns whether node in application should be upgraded by this */ public static boolean eligibleForUpgrade(Node node) { return upgradableNodeStates.contains(node.state()); } }
The `legacyRotation` field isn't really needed as we could've just written `deprecatedRotationField` directly from `application.rotations()` here. This is fine for now though, but let's remove it in the next version.
public Slime toSlime(Application application) { Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString(idField, application.id().serializedForm()); root.setLong(createdAtField, application.createdAt().toEpochMilli()); root.setString(deploymentSpecField, application.deploymentSpec().xmlForm()); root.setString(validationOverridesField, application.validationOverrides().xmlForm()); deploymentsToSlime(application.deployments().values(), root.setArray(deploymentsField)); toSlime(application.deploymentJobs(), root.setObject(deploymentJobsField)); toSlime(application.change(), root, deployingField); toSlime(application.outstandingChange(), root, outstandingChangeField); application.ownershipIssueId().ifPresent(issueId -> root.setString(ownershipIssueIdField, issueId.value())); application.owner().ifPresent(owner -> root.setString(ownerField, owner.username())); application.majorVersion().ifPresent(majorVersion -> root.setLong(majorVersionField, majorVersion)); root.setDouble(queryQualityField, application.metrics().queryServiceQuality()); root.setDouble(writeQualityField, application.metrics().writeServiceQuality()); application.pemDeployKey().ifPresent(pemDeployKey -> root.setString(pemDeployKeyField, pemDeployKey)); application.legacyRotation().ifPresent(rotation -> root.setString(deprecatedRotationField, rotation.asString())); Cursor rotations = root.setArray(rotationsField); application.rotations().forEach(rotation -> rotations.addString(rotation.asString())); toSlime(application.rotationStatus(), root.setArray(rotationStatusField)); return slime; }
application.legacyRotation().ifPresent(rotation -> root.setString(deprecatedRotationField, rotation.asString()));
public Slime toSlime(Application application) { Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString(idField, application.id().serializedForm()); root.setLong(createdAtField, application.createdAt().toEpochMilli()); root.setString(deploymentSpecField, application.deploymentSpec().xmlForm()); root.setString(validationOverridesField, application.validationOverrides().xmlForm()); deploymentsToSlime(application.deployments().values(), root.setArray(deploymentsField)); toSlime(application.deploymentJobs(), root.setObject(deploymentJobsField)); toSlime(application.change(), root, deployingField); toSlime(application.outstandingChange(), root, outstandingChangeField); application.ownershipIssueId().ifPresent(issueId -> root.setString(ownershipIssueIdField, issueId.value())); application.owner().ifPresent(owner -> root.setString(ownerField, owner.username())); application.majorVersion().ifPresent(majorVersion -> root.setLong(majorVersionField, majorVersion)); root.setDouble(queryQualityField, application.metrics().queryServiceQuality()); root.setDouble(writeQualityField, application.metrics().writeServiceQuality()); application.pemDeployKey().ifPresent(pemDeployKey -> root.setString(pemDeployKeyField, pemDeployKey)); application.legacyRotation().ifPresent(rotation -> root.setString(deprecatedRotationField, rotation.asString())); Cursor rotations = root.setArray(rotationsField); application.rotations().forEach(rotation -> rotations.addString(rotation.asString())); toSlime(application.rotationStatus(), root.setArray(rotationStatusField)); return slime; }
class ApplicationSerializer { private final String idField = "id"; private final String createdAtField = "createdAt"; private final String deploymentSpecField = "deploymentSpecField"; private final String validationOverridesField = "validationOverrides"; private final String deploymentsField = "deployments"; private final String deploymentJobsField = "deploymentJobs"; private final String deployingField = "deployingField"; private final String pinnedField = "pinned"; private final String outstandingChangeField = "outstandingChangeField"; private final String ownershipIssueIdField = "ownershipIssueId"; private final String ownerField = "confirmedOwner"; private final String majorVersionField = "majorVersion"; private final String writeQualityField = "writeQuality"; private final String queryQualityField = "queryQuality"; private final String pemDeployKeyField = "pemDeployKey"; private final String rotationsField = "endpoints"; private final String deprecatedRotationField = "rotation"; private final String rotationStatusField = "rotationStatus"; private final String zoneField = "zone"; private final String environmentField = "environment"; private final String regionField = "region"; private final String deployTimeField = "deployTime"; private final String applicationBuildNumberField = "applicationBuildNumber"; private final String applicationPackageRevisionField = "applicationPackageRevision"; private final String sourceRevisionField = "sourceRevision"; private final String repositoryField = "repositoryField"; private final String branchField = "branchField"; private final String commitField = "commitField"; private final String authorEmailField = "authorEmailField"; private final String compileVersionField = "compileVersion"; private final String buildTimeField = "buildTime"; private final String lastQueriedField = "lastQueried"; private final String lastWrittenField = "lastWritten"; private final String lastQueriesPerSecondField = "lastQueriesPerSecond"; private final String lastWritesPerSecondField = "lastWritesPerSecond"; private final String projectIdField = "projectId"; private final String jobStatusField = "jobStatus"; private final String issueIdField = "jiraIssueId"; private final String builtInternallyField = "builtInternally"; private final String jobTypeField = "jobType"; private final String errorField = "jobError"; private final String lastTriggeredField = "lastTriggered"; private final String lastCompletedField = "lastCompleted"; private final String firstFailingField = "firstFailing"; private final String lastSuccessField = "lastSuccess"; private final String pausedUntilField = "pausedUntil"; private final String jobRunIdField = "id"; private final String versionField = "version"; private final String revisionField = "revision"; private final String sourceVersionField = "sourceVersion"; private final String sourceApplicationField = "sourceRevision"; private final String reasonField = "reason"; private final String atField = "at"; private final String clusterInfoField = "clusterInfo"; private final String clusterInfoFlavorField = "flavor"; private final String clusterInfoCostField = "cost"; private final String clusterInfoCpuField = "flavorCpu"; private final String clusterInfoMemField = "flavorMem"; private final String clusterInfoDiskField = "flavorDisk"; private final String clusterInfoTypeField = "clusterType"; private final String clusterInfoHostnamesField = "hostnames"; private final String clusterUtilsField = "clusterUtils"; private final String clusterUtilsCpuField = "cpu"; private final String clusterUtilsMemField = "mem"; private final String clusterUtilsDiskField = "disk"; private final String clusterUtilsDiskBusyField = "diskbusy"; private final String deploymentMetricsField = "metrics"; private final String deploymentMetricsQPSField = "queriesPerSecond"; private final String deploymentMetricsWPSField = "writesPerSecond"; private final String deploymentMetricsDocsField = "documentCount"; private final String deploymentMetricsQueryLatencyField = "queryLatencyMillis"; private final String deploymentMetricsWriteLatencyField = "writeLatencyMillis"; private final String deploymentMetricsUpdateTime = "lastUpdated"; private final String deploymentMetricsWarningsField = "warnings"; private void deploymentsToSlime(Collection<Deployment> deployments, Cursor array) { for (Deployment deployment : deployments) deploymentToSlime(deployment, array.addObject()); } private void deploymentToSlime(Deployment deployment, Cursor object) { zoneIdToSlime(deployment.zone(), object.setObject(zoneField)); object.setString(versionField, deployment.version().toString()); object.setLong(deployTimeField, deployment.at().toEpochMilli()); toSlime(deployment.applicationVersion(), object.setObject(applicationPackageRevisionField)); clusterInfoToSlime(deployment.clusterInfo(), object); clusterUtilsToSlime(deployment.clusterUtils(), object); deploymentMetricsToSlime(deployment.metrics(), object); deployment.activity().lastQueried().ifPresent(instant -> object.setLong(lastQueriedField, instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> object.setLong(lastWrittenField, instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> object.setDouble(lastQueriesPerSecondField, value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> object.setDouble(lastWritesPerSecondField, value)); } private void deploymentMetricsToSlime(DeploymentMetrics metrics, Cursor object) { Cursor root = object.setObject(deploymentMetricsField); root.setDouble(deploymentMetricsQPSField, metrics.queriesPerSecond()); root.setDouble(deploymentMetricsWPSField, metrics.writesPerSecond()); root.setDouble(deploymentMetricsDocsField, metrics.documentCount()); root.setDouble(deploymentMetricsQueryLatencyField, metrics.queryLatencyMillis()); root.setDouble(deploymentMetricsWriteLatencyField, metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> root.setLong(deploymentMetricsUpdateTime, instant.toEpochMilli())); if (!metrics.warnings().isEmpty()) { Cursor warningsObject = root.setObject(deploymentMetricsWarningsField); metrics.warnings().forEach((warning, count) -> warningsObject.setLong(warning.name(), count)); } } private void clusterInfoToSlime(Map<ClusterSpec.Id, ClusterInfo> clusters, Cursor object) { Cursor root = object.setObject(clusterInfoField); for (Map.Entry<ClusterSpec.Id, ClusterInfo> entry : clusters.entrySet()) { toSlime(entry.getValue(), root.setObject(entry.getKey().value())); } } private void toSlime(ClusterInfo info, Cursor object) { object.setString(clusterInfoFlavorField, info.getFlavor()); object.setLong(clusterInfoCostField, info.getFlavorCost()); object.setDouble(clusterInfoCpuField, info.getFlavorCPU()); object.setDouble(clusterInfoMemField, info.getFlavorMem()); object.setDouble(clusterInfoDiskField, info.getFlavorDisk()); object.setString(clusterInfoTypeField, info.getClusterType().name()); Cursor array = object.setArray(clusterInfoHostnamesField); for (String host : info.getHostnames()) { array.addString(host); } } private void clusterUtilsToSlime(Map<ClusterSpec.Id, ClusterUtilization> clusters, Cursor object) { Cursor root = object.setObject(clusterUtilsField); for (Map.Entry<ClusterSpec.Id, ClusterUtilization> entry : clusters.entrySet()) { toSlime(entry.getValue(), root.setObject(entry.getKey().value())); } } private void toSlime(ClusterUtilization utils, Cursor object) { object.setDouble(clusterUtilsCpuField, utils.getCpu()); object.setDouble(clusterUtilsMemField, utils.getMemory()); object.setDouble(clusterUtilsDiskField, utils.getDisk()); object.setDouble(clusterUtilsDiskBusyField, utils.getDiskBusy()); } private void zoneIdToSlime(ZoneId zone, Cursor object) { object.setString(environmentField, zone.environment().value()); object.setString(regionField, zone.region().value()); } private void toSlime(ApplicationVersion applicationVersion, Cursor object) { if (applicationVersion.buildNumber().isPresent() && applicationVersion.source().isPresent()) { object.setLong(applicationBuildNumberField, applicationVersion.buildNumber().getAsLong()); toSlime(applicationVersion.source().get(), object.setObject(sourceRevisionField)); applicationVersion.authorEmail().ifPresent(email -> object.setString(authorEmailField, email)); applicationVersion.compileVersion().ifPresent(version -> object.setString(compileVersionField, version.toString())); applicationVersion.buildTime().ifPresent(time -> object.setLong(buildTimeField, time.toEpochMilli())); } } private void toSlime(SourceRevision sourceRevision, Cursor object) { object.setString(repositoryField, sourceRevision.repository()); object.setString(branchField, sourceRevision.branch()); object.setString(commitField, sourceRevision.commit()); } private void toSlime(DeploymentJobs deploymentJobs, Cursor cursor) { deploymentJobs.projectId().ifPresent(projectId -> cursor.setLong(projectIdField, projectId)); jobStatusToSlime(deploymentJobs.jobStatus().values(), cursor.setArray(jobStatusField)); deploymentJobs.issueId().ifPresent(jiraIssueId -> cursor.setString(issueIdField, jiraIssueId.value())); cursor.setBool(builtInternallyField, deploymentJobs.deployedInternally()); } private void jobStatusToSlime(Collection<JobStatus> jobStatuses, Cursor jobStatusArray) { for (JobStatus jobStatus : jobStatuses) toSlime(jobStatus, jobStatusArray.addObject()); } private void toSlime(JobStatus jobStatus, Cursor object) { object.setString(jobTypeField, jobStatus.type().jobName()); if (jobStatus.jobError().isPresent()) object.setString(errorField, jobStatus.jobError().get().name()); jobStatus.lastTriggered().ifPresent(run -> jobRunToSlime(run, object, lastTriggeredField)); jobStatus.lastCompleted().ifPresent(run -> jobRunToSlime(run, object, lastCompletedField)); jobStatus.lastSuccess().ifPresent(run -> jobRunToSlime(run, object, lastSuccessField)); jobStatus.firstFailing().ifPresent(run -> jobRunToSlime(run, object, firstFailingField)); jobStatus.pausedUntil().ifPresent(until -> object.setLong(pausedUntilField, until)); } private void jobRunToSlime(JobStatus.JobRun jobRun, Cursor parent, String jobRunObjectName) { Cursor object = parent.setObject(jobRunObjectName); object.setLong(jobRunIdField, jobRun.id()); object.setString(versionField, jobRun.platform().toString()); toSlime(jobRun.application(), object.setObject(revisionField)); jobRun.sourcePlatform().ifPresent(version -> object.setString(sourceVersionField, version.toString())); jobRun.sourceApplication().ifPresent(version -> toSlime(version, object.setObject(sourceApplicationField))); object.setString(reasonField, jobRun.reason()); object.setLong(atField, jobRun.at().toEpochMilli()); } private void toSlime(Change deploying, Cursor parentObject, String fieldName) { if (deploying.isEmpty()) return; Cursor object = parentObject.setObject(fieldName); if (deploying.platform().isPresent()) object.setString(versionField, deploying.platform().get().toString()); if (deploying.application().isPresent()) toSlime(deploying.application().get(), object); if (deploying.isPinned()) object.setBool(pinnedField, true); } private void toSlime(Map<HostName, RotationStatus> rotationStatus, Cursor array) { rotationStatus.forEach((hostname, status) -> { Cursor object = array.addObject(); object.setString("hostname", hostname.value()); object.setString("status", status.name()); }); } public Application fromSlime(Slime slime) { Inspector root = slime.get(); ApplicationId id = ApplicationId.fromSerializedForm(root.field(idField).asString()); Instant createdAt = Instant.ofEpochMilli(root.field(createdAtField).asLong()); DeploymentSpec deploymentSpec = DeploymentSpec.fromXml(root.field(deploymentSpecField).asString(), false); ValidationOverrides validationOverrides = ValidationOverrides.fromXml(root.field(validationOverridesField).asString()); List<Deployment> deployments = deploymentsFromSlime(root.field(deploymentsField)); DeploymentJobs deploymentJobs = deploymentJobsFromSlime(root.field(deploymentJobsField)); Change deploying = changeFromSlime(root.field(deployingField)); Change outstandingChange = changeFromSlime(root.field(outstandingChangeField)); Optional<IssueId> ownershipIssueId = optionalString(root.field(ownershipIssueIdField)).map(IssueId::from); Optional<User> owner = optionalString(root.field(ownerField)).map(User::from); OptionalInt majorVersion = optionalInteger(root.field(majorVersionField)); ApplicationMetrics metrics = new ApplicationMetrics(root.field(queryQualityField).asDouble(), root.field(writeQualityField).asDouble()); Optional<String> pemDeployKey = optionalString(root.field(pemDeployKeyField)); Optional<RotationId> legacyRotation = optionalString(root.field(deprecatedRotationField)).map(RotationId::new); List<RotationId> rotations = rotationsFromSlime(root); Map<HostName, RotationStatus> rotationStatus = rotationStatusFromSlime(root.field(rotationStatusField)); return new Application(id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, deploying, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, legacyRotation, rotations, rotationStatus); } private List<Deployment> deploymentsFromSlime(Inspector array) { List<Deployment> deployments = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> deployments.add(deploymentFromSlime(item))); return deployments; } private Deployment deploymentFromSlime(Inspector deploymentObject) { return new Deployment(zoneIdFromSlime(deploymentObject.field(zoneField)), applicationVersionFromSlime(deploymentObject.field(applicationPackageRevisionField)), Version.fromString(deploymentObject.field(versionField).asString()), Instant.ofEpochMilli(deploymentObject.field(deployTimeField).asLong()), clusterUtilsMapFromSlime(deploymentObject.field(clusterUtilsField)), clusterInfoMapFromSlime(deploymentObject.field(clusterInfoField)), deploymentMetricsFromSlime(deploymentObject.field(deploymentMetricsField)), DeploymentActivity.create(optionalInstant(deploymentObject.field(lastQueriedField)), optionalInstant(deploymentObject.field(lastWrittenField)), optionalDouble(deploymentObject.field(lastQueriesPerSecondField)), optionalDouble(deploymentObject.field(lastWritesPerSecondField)))); } private DeploymentMetrics deploymentMetricsFromSlime(Inspector object) { Optional<Instant> instant = object.field(deploymentMetricsUpdateTime).valid() ? Optional.of(Instant.ofEpochMilli(object.field(deploymentMetricsUpdateTime).asLong())) : Optional.empty(); return new DeploymentMetrics(object.field(deploymentMetricsQPSField).asDouble(), object.field(deploymentMetricsWPSField).asDouble(), object.field(deploymentMetricsDocsField).asDouble(), object.field(deploymentMetricsQueryLatencyField).asDouble(), object.field(deploymentMetricsWriteLatencyField).asDouble(), instant, deploymentWarningsFrom(object.field(deploymentMetricsWarningsField))); } private Map<DeploymentMetrics.Warning, Integer> deploymentWarningsFrom(Inspector object) { Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>(); object.traverse((ObjectTraverser) (name, value) -> warnings.put(DeploymentMetrics.Warning.valueOf(name), (int) value.asLong())); return Collections.unmodifiableMap(warnings); } private Map<HostName, RotationStatus> rotationStatusFromSlime(Inspector object) { if (!object.valid()) { return Collections.emptyMap(); } Map<HostName, RotationStatus> rotationStatus = new TreeMap<>(); object.traverse((ArrayTraverser) (idx, inspect) -> { HostName hostname = HostName.from(inspect.field("hostname").asString()); RotationStatus status = RotationStatus.valueOf(inspect.field("status").asString()); rotationStatus.put(hostname, status); }); return Collections.unmodifiableMap(rotationStatus); } private Map<ClusterSpec.Id, ClusterInfo> clusterInfoMapFromSlime (Inspector object) { Map<ClusterSpec.Id, ClusterInfo> map = new HashMap<>(); object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterInfoFromSlime(value))); return map; } private Map<ClusterSpec.Id, ClusterUtilization> clusterUtilsMapFromSlime(Inspector object) { Map<ClusterSpec.Id, ClusterUtilization> map = new HashMap<>(); object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterUtililzationFromSlime(value))); return map; } private ClusterUtilization clusterUtililzationFromSlime(Inspector object) { double cpu = object.field(clusterUtilsCpuField).asDouble(); double mem = object.field(clusterUtilsMemField).asDouble(); double disk = object.field(clusterUtilsDiskField).asDouble(); double diskBusy = object.field(clusterUtilsDiskBusyField).asDouble(); return new ClusterUtilization(mem, cpu, disk, diskBusy); } private ClusterInfo clusterInfoFromSlime(Inspector inspector) { String flavor = inspector.field(clusterInfoFlavorField).asString(); int cost = (int)inspector.field(clusterInfoCostField).asLong(); String type = inspector.field(clusterInfoTypeField).asString(); double flavorCpu = inspector.field(clusterInfoCpuField).asDouble(); double flavorMem = inspector.field(clusterInfoMemField).asDouble(); double flavorDisk = inspector.field(clusterInfoDiskField).asDouble(); List<String> hostnames = new ArrayList<>(); inspector.field(clusterInfoHostnamesField).traverse((ArrayTraverser)(int index, Inspector value) -> hostnames.add(value.asString())); return new ClusterInfo(flavor, cost, flavorCpu, flavorMem, flavorDisk, ClusterSpec.Type.from(type), hostnames); } private ZoneId zoneIdFromSlime(Inspector object) { return ZoneId.from(object.field(environmentField).asString(), object.field(regionField).asString()); } private ApplicationVersion applicationVersionFromSlime(Inspector object) { if ( ! object.valid()) return ApplicationVersion.unknown; OptionalLong applicationBuildNumber = optionalLong(object.field(applicationBuildNumberField)); Optional<SourceRevision> sourceRevision = sourceRevisionFromSlime(object.field(sourceRevisionField)); if ( ! sourceRevision.isPresent() || ! applicationBuildNumber.isPresent()) { return ApplicationVersion.unknown; } Optional<String> authorEmail = optionalString(object.field(authorEmailField)); Optional<Version> compileVersion = optionalString(object.field(compileVersionField)).map(Version::fromString); Optional<Instant> buildTime = optionalInstant(object.field(buildTimeField)); if ( ! authorEmail.isPresent()) return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong()); if ( ! compileVersion.isPresent() || ! buildTime.isPresent()) return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong(), authorEmail.get()); return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong(), authorEmail.get(), compileVersion.get(), buildTime.get()); } private Optional<SourceRevision> sourceRevisionFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new SourceRevision(object.field(repositoryField).asString(), object.field(branchField).asString(), object.field(commitField).asString())); } private DeploymentJobs deploymentJobsFromSlime(Inspector object) { OptionalLong projectId = optionalLong(object.field(projectIdField)); List<JobStatus> jobStatusList = jobStatusListFromSlime(object.field(jobStatusField)); Optional<IssueId> issueId = optionalString(object.field(issueIdField)).map(IssueId::from); boolean builtInternally = object.field(builtInternallyField).asBool(); return new DeploymentJobs(projectId, jobStatusList, issueId, builtInternally); } private Change changeFromSlime(Inspector object) { if ( ! object.valid()) return Change.empty(); Inspector versionFieldValue = object.field(versionField); Change change = Change.empty(); if (versionFieldValue.valid()) change = Change.of(Version.fromString(versionFieldValue.asString())); if (object.field(applicationBuildNumberField).valid()) change = change.with(applicationVersionFromSlime(object)); if (object.field(pinnedField).asBool()) change = change.withPin(); return change; } private List<JobStatus> jobStatusListFromSlime(Inspector array) { List<JobStatus> jobStatusList = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> jobStatusFromSlime(item).ifPresent(jobStatusList::add)); return jobStatusList; } private Optional<JobStatus> jobStatusFromSlime(Inspector object) { Optional<JobType> jobType = JobType.fromOptionalJobName(object.field(jobTypeField).asString()); if (! jobType.isPresent()) return Optional.empty(); Optional<JobError> jobError = Optional.empty(); if (object.field(errorField).valid()) jobError = Optional.of(JobError.valueOf(object.field(errorField).asString())); return Optional.of(new JobStatus(jobType.get(), jobError, jobRunFromSlime(object.field(lastTriggeredField)), jobRunFromSlime(object.field(lastCompletedField)), jobRunFromSlime(object.field(firstFailingField)), jobRunFromSlime(object.field(lastSuccessField)), optionalLong(object.field(pausedUntilField)))); } private Optional<JobStatus.JobRun> jobRunFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new JobStatus.JobRun(object.field(jobRunIdField).asLong(), new Version(object.field(versionField).asString()), applicationVersionFromSlime(object.field(revisionField)), optionalString(object.field(sourceVersionField)).map(Version::fromString), Optional.of(object.field(sourceApplicationField)).filter(Inspector::valid).map(this::applicationVersionFromSlime), object.field(reasonField).asString(), Instant.ofEpochMilli(object.field(atField).asLong()))); } private List<RotationId> rotationsFromSlime(Inspector root) { final var rotations = rotationListFromSlime(root.field(rotationsField)); final var legacyRotation = legacyRotationFromSlime(root.field(deprecatedRotationField)); if (legacyRotation.isPresent() && ! rotations.contains(legacyRotation.get())) { rotations.add(legacyRotation.get()); } return rotations; } private List<RotationId> rotationListFromSlime(Inspector field) { final var rotations = new ArrayList<RotationId>(); field.traverse((ArrayTraverser) (idx, inspector) -> { final var rotation = new RotationId(inspector.asString()); rotations.add(rotation); }); return rotations; } private Optional<RotationId> legacyRotationFromSlime(Inspector field) { return field.valid() ? optionalString(field).map(RotationId::new) : Optional.empty(); } private OptionalLong optionalLong(Inspector field) { return field.valid() ? OptionalLong.of(field.asLong()) : OptionalLong.empty(); } private OptionalInt optionalInteger(Inspector field) { return field.valid() ? OptionalInt.of((int) field.asLong()) : OptionalInt.empty(); } private OptionalDouble optionalDouble(Inspector field) { return field.valid() ? OptionalDouble.of(field.asDouble()) : OptionalDouble.empty(); } private Optional<String> optionalString(Inspector field) { return SlimeUtils.optionalString(field); } private Optional<Instant> optionalInstant(Inspector field) { OptionalLong value = optionalLong(field); return value.isPresent() ? Optional.of(Instant.ofEpochMilli(value.getAsLong())) : Optional.empty(); } }
class ApplicationSerializer { private final String idField = "id"; private final String createdAtField = "createdAt"; private final String deploymentSpecField = "deploymentSpecField"; private final String validationOverridesField = "validationOverrides"; private final String deploymentsField = "deployments"; private final String deploymentJobsField = "deploymentJobs"; private final String deployingField = "deployingField"; private final String pinnedField = "pinned"; private final String outstandingChangeField = "outstandingChangeField"; private final String ownershipIssueIdField = "ownershipIssueId"; private final String ownerField = "confirmedOwner"; private final String majorVersionField = "majorVersion"; private final String writeQualityField = "writeQuality"; private final String queryQualityField = "queryQuality"; private final String pemDeployKeyField = "pemDeployKey"; private final String rotationsField = "endpoints"; private final String deprecatedRotationField = "rotation"; private final String rotationStatusField = "rotationStatus"; private final String zoneField = "zone"; private final String environmentField = "environment"; private final String regionField = "region"; private final String deployTimeField = "deployTime"; private final String applicationBuildNumberField = "applicationBuildNumber"; private final String applicationPackageRevisionField = "applicationPackageRevision"; private final String sourceRevisionField = "sourceRevision"; private final String repositoryField = "repositoryField"; private final String branchField = "branchField"; private final String commitField = "commitField"; private final String authorEmailField = "authorEmailField"; private final String compileVersionField = "compileVersion"; private final String buildTimeField = "buildTime"; private final String lastQueriedField = "lastQueried"; private final String lastWrittenField = "lastWritten"; private final String lastQueriesPerSecondField = "lastQueriesPerSecond"; private final String lastWritesPerSecondField = "lastWritesPerSecond"; private final String projectIdField = "projectId"; private final String jobStatusField = "jobStatus"; private final String issueIdField = "jiraIssueId"; private final String builtInternallyField = "builtInternally"; private final String jobTypeField = "jobType"; private final String errorField = "jobError"; private final String lastTriggeredField = "lastTriggered"; private final String lastCompletedField = "lastCompleted"; private final String firstFailingField = "firstFailing"; private final String lastSuccessField = "lastSuccess"; private final String pausedUntilField = "pausedUntil"; private final String jobRunIdField = "id"; private final String versionField = "version"; private final String revisionField = "revision"; private final String sourceVersionField = "sourceVersion"; private final String sourceApplicationField = "sourceRevision"; private final String reasonField = "reason"; private final String atField = "at"; private final String clusterInfoField = "clusterInfo"; private final String clusterInfoFlavorField = "flavor"; private final String clusterInfoCostField = "cost"; private final String clusterInfoCpuField = "flavorCpu"; private final String clusterInfoMemField = "flavorMem"; private final String clusterInfoDiskField = "flavorDisk"; private final String clusterInfoTypeField = "clusterType"; private final String clusterInfoHostnamesField = "hostnames"; private final String clusterUtilsField = "clusterUtils"; private final String clusterUtilsCpuField = "cpu"; private final String clusterUtilsMemField = "mem"; private final String clusterUtilsDiskField = "disk"; private final String clusterUtilsDiskBusyField = "diskbusy"; private final String deploymentMetricsField = "metrics"; private final String deploymentMetricsQPSField = "queriesPerSecond"; private final String deploymentMetricsWPSField = "writesPerSecond"; private final String deploymentMetricsDocsField = "documentCount"; private final String deploymentMetricsQueryLatencyField = "queryLatencyMillis"; private final String deploymentMetricsWriteLatencyField = "writeLatencyMillis"; private final String deploymentMetricsUpdateTime = "lastUpdated"; private final String deploymentMetricsWarningsField = "warnings"; private void deploymentsToSlime(Collection<Deployment> deployments, Cursor array) { for (Deployment deployment : deployments) deploymentToSlime(deployment, array.addObject()); } private void deploymentToSlime(Deployment deployment, Cursor object) { zoneIdToSlime(deployment.zone(), object.setObject(zoneField)); object.setString(versionField, deployment.version().toString()); object.setLong(deployTimeField, deployment.at().toEpochMilli()); toSlime(deployment.applicationVersion(), object.setObject(applicationPackageRevisionField)); clusterInfoToSlime(deployment.clusterInfo(), object); clusterUtilsToSlime(deployment.clusterUtils(), object); deploymentMetricsToSlime(deployment.metrics(), object); deployment.activity().lastQueried().ifPresent(instant -> object.setLong(lastQueriedField, instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> object.setLong(lastWrittenField, instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> object.setDouble(lastQueriesPerSecondField, value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> object.setDouble(lastWritesPerSecondField, value)); } private void deploymentMetricsToSlime(DeploymentMetrics metrics, Cursor object) { Cursor root = object.setObject(deploymentMetricsField); root.setDouble(deploymentMetricsQPSField, metrics.queriesPerSecond()); root.setDouble(deploymentMetricsWPSField, metrics.writesPerSecond()); root.setDouble(deploymentMetricsDocsField, metrics.documentCount()); root.setDouble(deploymentMetricsQueryLatencyField, metrics.queryLatencyMillis()); root.setDouble(deploymentMetricsWriteLatencyField, metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> root.setLong(deploymentMetricsUpdateTime, instant.toEpochMilli())); if (!metrics.warnings().isEmpty()) { Cursor warningsObject = root.setObject(deploymentMetricsWarningsField); metrics.warnings().forEach((warning, count) -> warningsObject.setLong(warning.name(), count)); } } private void clusterInfoToSlime(Map<ClusterSpec.Id, ClusterInfo> clusters, Cursor object) { Cursor root = object.setObject(clusterInfoField); for (Map.Entry<ClusterSpec.Id, ClusterInfo> entry : clusters.entrySet()) { toSlime(entry.getValue(), root.setObject(entry.getKey().value())); } } private void toSlime(ClusterInfo info, Cursor object) { object.setString(clusterInfoFlavorField, info.getFlavor()); object.setLong(clusterInfoCostField, info.getFlavorCost()); object.setDouble(clusterInfoCpuField, info.getFlavorCPU()); object.setDouble(clusterInfoMemField, info.getFlavorMem()); object.setDouble(clusterInfoDiskField, info.getFlavorDisk()); object.setString(clusterInfoTypeField, info.getClusterType().name()); Cursor array = object.setArray(clusterInfoHostnamesField); for (String host : info.getHostnames()) { array.addString(host); } } private void clusterUtilsToSlime(Map<ClusterSpec.Id, ClusterUtilization> clusters, Cursor object) { Cursor root = object.setObject(clusterUtilsField); for (Map.Entry<ClusterSpec.Id, ClusterUtilization> entry : clusters.entrySet()) { toSlime(entry.getValue(), root.setObject(entry.getKey().value())); } } private void toSlime(ClusterUtilization utils, Cursor object) { object.setDouble(clusterUtilsCpuField, utils.getCpu()); object.setDouble(clusterUtilsMemField, utils.getMemory()); object.setDouble(clusterUtilsDiskField, utils.getDisk()); object.setDouble(clusterUtilsDiskBusyField, utils.getDiskBusy()); } private void zoneIdToSlime(ZoneId zone, Cursor object) { object.setString(environmentField, zone.environment().value()); object.setString(regionField, zone.region().value()); } private void toSlime(ApplicationVersion applicationVersion, Cursor object) { if (applicationVersion.buildNumber().isPresent() && applicationVersion.source().isPresent()) { object.setLong(applicationBuildNumberField, applicationVersion.buildNumber().getAsLong()); toSlime(applicationVersion.source().get(), object.setObject(sourceRevisionField)); applicationVersion.authorEmail().ifPresent(email -> object.setString(authorEmailField, email)); applicationVersion.compileVersion().ifPresent(version -> object.setString(compileVersionField, version.toString())); applicationVersion.buildTime().ifPresent(time -> object.setLong(buildTimeField, time.toEpochMilli())); } } private void toSlime(SourceRevision sourceRevision, Cursor object) { object.setString(repositoryField, sourceRevision.repository()); object.setString(branchField, sourceRevision.branch()); object.setString(commitField, sourceRevision.commit()); } private void toSlime(DeploymentJobs deploymentJobs, Cursor cursor) { deploymentJobs.projectId().ifPresent(projectId -> cursor.setLong(projectIdField, projectId)); jobStatusToSlime(deploymentJobs.jobStatus().values(), cursor.setArray(jobStatusField)); deploymentJobs.issueId().ifPresent(jiraIssueId -> cursor.setString(issueIdField, jiraIssueId.value())); cursor.setBool(builtInternallyField, deploymentJobs.deployedInternally()); } private void jobStatusToSlime(Collection<JobStatus> jobStatuses, Cursor jobStatusArray) { for (JobStatus jobStatus : jobStatuses) toSlime(jobStatus, jobStatusArray.addObject()); } private void toSlime(JobStatus jobStatus, Cursor object) { object.setString(jobTypeField, jobStatus.type().jobName()); if (jobStatus.jobError().isPresent()) object.setString(errorField, jobStatus.jobError().get().name()); jobStatus.lastTriggered().ifPresent(run -> jobRunToSlime(run, object, lastTriggeredField)); jobStatus.lastCompleted().ifPresent(run -> jobRunToSlime(run, object, lastCompletedField)); jobStatus.lastSuccess().ifPresent(run -> jobRunToSlime(run, object, lastSuccessField)); jobStatus.firstFailing().ifPresent(run -> jobRunToSlime(run, object, firstFailingField)); jobStatus.pausedUntil().ifPresent(until -> object.setLong(pausedUntilField, until)); } private void jobRunToSlime(JobStatus.JobRun jobRun, Cursor parent, String jobRunObjectName) { Cursor object = parent.setObject(jobRunObjectName); object.setLong(jobRunIdField, jobRun.id()); object.setString(versionField, jobRun.platform().toString()); toSlime(jobRun.application(), object.setObject(revisionField)); jobRun.sourcePlatform().ifPresent(version -> object.setString(sourceVersionField, version.toString())); jobRun.sourceApplication().ifPresent(version -> toSlime(version, object.setObject(sourceApplicationField))); object.setString(reasonField, jobRun.reason()); object.setLong(atField, jobRun.at().toEpochMilli()); } private void toSlime(Change deploying, Cursor parentObject, String fieldName) { if (deploying.isEmpty()) return; Cursor object = parentObject.setObject(fieldName); if (deploying.platform().isPresent()) object.setString(versionField, deploying.platform().get().toString()); if (deploying.application().isPresent()) toSlime(deploying.application().get(), object); if (deploying.isPinned()) object.setBool(pinnedField, true); } private void toSlime(Map<HostName, RotationStatus> rotationStatus, Cursor array) { rotationStatus.forEach((hostname, status) -> { Cursor object = array.addObject(); object.setString("hostname", hostname.value()); object.setString("status", status.name()); }); } public Application fromSlime(Slime slime) { Inspector root = slime.get(); ApplicationId id = ApplicationId.fromSerializedForm(root.field(idField).asString()); Instant createdAt = Instant.ofEpochMilli(root.field(createdAtField).asLong()); DeploymentSpec deploymentSpec = DeploymentSpec.fromXml(root.field(deploymentSpecField).asString(), false); ValidationOverrides validationOverrides = ValidationOverrides.fromXml(root.field(validationOverridesField).asString()); List<Deployment> deployments = deploymentsFromSlime(root.field(deploymentsField)); DeploymentJobs deploymentJobs = deploymentJobsFromSlime(root.field(deploymentJobsField)); Change deploying = changeFromSlime(root.field(deployingField)); Change outstandingChange = changeFromSlime(root.field(outstandingChangeField)); Optional<IssueId> ownershipIssueId = optionalString(root.field(ownershipIssueIdField)).map(IssueId::from); Optional<User> owner = optionalString(root.field(ownerField)).map(User::from); OptionalInt majorVersion = optionalInteger(root.field(majorVersionField)); ApplicationMetrics metrics = new ApplicationMetrics(root.field(queryQualityField).asDouble(), root.field(writeQualityField).asDouble()); Optional<String> pemDeployKey = optionalString(root.field(pemDeployKeyField)); Optional<RotationId> legacyRotation = optionalString(root.field(deprecatedRotationField)).map(RotationId::new); List<RotationId> rotations = rotationsFromSlime(root); Map<HostName, RotationStatus> rotationStatus = rotationStatusFromSlime(root.field(rotationStatusField)); return new Application(id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, deploying, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, legacyRotation, rotations, rotationStatus); } private List<Deployment> deploymentsFromSlime(Inspector array) { List<Deployment> deployments = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> deployments.add(deploymentFromSlime(item))); return deployments; } private Deployment deploymentFromSlime(Inspector deploymentObject) { return new Deployment(zoneIdFromSlime(deploymentObject.field(zoneField)), applicationVersionFromSlime(deploymentObject.field(applicationPackageRevisionField)), Version.fromString(deploymentObject.field(versionField).asString()), Instant.ofEpochMilli(deploymentObject.field(deployTimeField).asLong()), clusterUtilsMapFromSlime(deploymentObject.field(clusterUtilsField)), clusterInfoMapFromSlime(deploymentObject.field(clusterInfoField)), deploymentMetricsFromSlime(deploymentObject.field(deploymentMetricsField)), DeploymentActivity.create(optionalInstant(deploymentObject.field(lastQueriedField)), optionalInstant(deploymentObject.field(lastWrittenField)), optionalDouble(deploymentObject.field(lastQueriesPerSecondField)), optionalDouble(deploymentObject.field(lastWritesPerSecondField)))); } private DeploymentMetrics deploymentMetricsFromSlime(Inspector object) { Optional<Instant> instant = object.field(deploymentMetricsUpdateTime).valid() ? Optional.of(Instant.ofEpochMilli(object.field(deploymentMetricsUpdateTime).asLong())) : Optional.empty(); return new DeploymentMetrics(object.field(deploymentMetricsQPSField).asDouble(), object.field(deploymentMetricsWPSField).asDouble(), object.field(deploymentMetricsDocsField).asDouble(), object.field(deploymentMetricsQueryLatencyField).asDouble(), object.field(deploymentMetricsWriteLatencyField).asDouble(), instant, deploymentWarningsFrom(object.field(deploymentMetricsWarningsField))); } private Map<DeploymentMetrics.Warning, Integer> deploymentWarningsFrom(Inspector object) { Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>(); object.traverse((ObjectTraverser) (name, value) -> warnings.put(DeploymentMetrics.Warning.valueOf(name), (int) value.asLong())); return Collections.unmodifiableMap(warnings); } private Map<HostName, RotationStatus> rotationStatusFromSlime(Inspector object) { if (!object.valid()) { return Collections.emptyMap(); } Map<HostName, RotationStatus> rotationStatus = new TreeMap<>(); object.traverse((ArrayTraverser) (idx, inspect) -> { HostName hostname = HostName.from(inspect.field("hostname").asString()); RotationStatus status = RotationStatus.valueOf(inspect.field("status").asString()); rotationStatus.put(hostname, status); }); return Collections.unmodifiableMap(rotationStatus); } private Map<ClusterSpec.Id, ClusterInfo> clusterInfoMapFromSlime (Inspector object) { Map<ClusterSpec.Id, ClusterInfo> map = new HashMap<>(); object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterInfoFromSlime(value))); return map; } private Map<ClusterSpec.Id, ClusterUtilization> clusterUtilsMapFromSlime(Inspector object) { Map<ClusterSpec.Id, ClusterUtilization> map = new HashMap<>(); object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterUtililzationFromSlime(value))); return map; } private ClusterUtilization clusterUtililzationFromSlime(Inspector object) { double cpu = object.field(clusterUtilsCpuField).asDouble(); double mem = object.field(clusterUtilsMemField).asDouble(); double disk = object.field(clusterUtilsDiskField).asDouble(); double diskBusy = object.field(clusterUtilsDiskBusyField).asDouble(); return new ClusterUtilization(mem, cpu, disk, diskBusy); } private ClusterInfo clusterInfoFromSlime(Inspector inspector) { String flavor = inspector.field(clusterInfoFlavorField).asString(); int cost = (int)inspector.field(clusterInfoCostField).asLong(); String type = inspector.field(clusterInfoTypeField).asString(); double flavorCpu = inspector.field(clusterInfoCpuField).asDouble(); double flavorMem = inspector.field(clusterInfoMemField).asDouble(); double flavorDisk = inspector.field(clusterInfoDiskField).asDouble(); List<String> hostnames = new ArrayList<>(); inspector.field(clusterInfoHostnamesField).traverse((ArrayTraverser)(int index, Inspector value) -> hostnames.add(value.asString())); return new ClusterInfo(flavor, cost, flavorCpu, flavorMem, flavorDisk, ClusterSpec.Type.from(type), hostnames); } private ZoneId zoneIdFromSlime(Inspector object) { return ZoneId.from(object.field(environmentField).asString(), object.field(regionField).asString()); } private ApplicationVersion applicationVersionFromSlime(Inspector object) { if ( ! object.valid()) return ApplicationVersion.unknown; OptionalLong applicationBuildNumber = optionalLong(object.field(applicationBuildNumberField)); Optional<SourceRevision> sourceRevision = sourceRevisionFromSlime(object.field(sourceRevisionField)); if ( ! sourceRevision.isPresent() || ! applicationBuildNumber.isPresent()) { return ApplicationVersion.unknown; } Optional<String> authorEmail = optionalString(object.field(authorEmailField)); Optional<Version> compileVersion = optionalString(object.field(compileVersionField)).map(Version::fromString); Optional<Instant> buildTime = optionalInstant(object.field(buildTimeField)); if ( ! authorEmail.isPresent()) return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong()); if ( ! compileVersion.isPresent() || ! buildTime.isPresent()) return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong(), authorEmail.get()); return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong(), authorEmail.get(), compileVersion.get(), buildTime.get()); } private Optional<SourceRevision> sourceRevisionFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new SourceRevision(object.field(repositoryField).asString(), object.field(branchField).asString(), object.field(commitField).asString())); } private DeploymentJobs deploymentJobsFromSlime(Inspector object) { OptionalLong projectId = optionalLong(object.field(projectIdField)); List<JobStatus> jobStatusList = jobStatusListFromSlime(object.field(jobStatusField)); Optional<IssueId> issueId = optionalString(object.field(issueIdField)).map(IssueId::from); boolean builtInternally = object.field(builtInternallyField).asBool(); return new DeploymentJobs(projectId, jobStatusList, issueId, builtInternally); } private Change changeFromSlime(Inspector object) { if ( ! object.valid()) return Change.empty(); Inspector versionFieldValue = object.field(versionField); Change change = Change.empty(); if (versionFieldValue.valid()) change = Change.of(Version.fromString(versionFieldValue.asString())); if (object.field(applicationBuildNumberField).valid()) change = change.with(applicationVersionFromSlime(object)); if (object.field(pinnedField).asBool()) change = change.withPin(); return change; } private List<JobStatus> jobStatusListFromSlime(Inspector array) { List<JobStatus> jobStatusList = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> jobStatusFromSlime(item).ifPresent(jobStatusList::add)); return jobStatusList; } private Optional<JobStatus> jobStatusFromSlime(Inspector object) { Optional<JobType> jobType = JobType.fromOptionalJobName(object.field(jobTypeField).asString()); if (! jobType.isPresent()) return Optional.empty(); Optional<JobError> jobError = Optional.empty(); if (object.field(errorField).valid()) jobError = Optional.of(JobError.valueOf(object.field(errorField).asString())); return Optional.of(new JobStatus(jobType.get(), jobError, jobRunFromSlime(object.field(lastTriggeredField)), jobRunFromSlime(object.field(lastCompletedField)), jobRunFromSlime(object.field(firstFailingField)), jobRunFromSlime(object.field(lastSuccessField)), optionalLong(object.field(pausedUntilField)))); } private Optional<JobStatus.JobRun> jobRunFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new JobStatus.JobRun(object.field(jobRunIdField).asLong(), new Version(object.field(versionField).asString()), applicationVersionFromSlime(object.field(revisionField)), optionalString(object.field(sourceVersionField)).map(Version::fromString), Optional.of(object.field(sourceApplicationField)).filter(Inspector::valid).map(this::applicationVersionFromSlime), object.field(reasonField).asString(), Instant.ofEpochMilli(object.field(atField).asLong()))); } private List<RotationId> rotationsFromSlime(Inspector root) { final var rotations = rotationListFromSlime(root.field(rotationsField)); final var legacyRotation = legacyRotationFromSlime(root.field(deprecatedRotationField)); if (legacyRotation.isPresent() && ! rotations.contains(legacyRotation.get())) { rotations.add(legacyRotation.get()); } return rotations; } private List<RotationId> rotationListFromSlime(Inspector field) { final var rotations = new ArrayList<RotationId>(); field.traverse((ArrayTraverser) (idx, inspector) -> { final var rotation = new RotationId(inspector.asString()); rotations.add(rotation); }); return rotations; } private Optional<RotationId> legacyRotationFromSlime(Inspector field) { return field.valid() ? optionalString(field).map(RotationId::new) : Optional.empty(); } private OptionalLong optionalLong(Inspector field) { return field.valid() ? OptionalLong.of(field.asLong()) : OptionalLong.empty(); } private OptionalInt optionalInteger(Inspector field) { return field.valid() ? OptionalInt.of((int) field.asLong()) : OptionalInt.empty(); } private OptionalDouble optionalDouble(Inspector field) { return field.valid() ? OptionalDouble.of(field.asDouble()) : OptionalDouble.empty(); } private Optional<String> optionalString(Inspector field) { return SlimeUtils.optionalString(field); } private Optional<Instant> optionalInstant(Inspector field) { OptionalLong value = optionalLong(field); return value.isPresent() ? Optional.of(Instant.ofEpochMilli(value.getAsLong())) : Optional.empty(); } }
Yup. More important to have the issue fixed :) Removing it with the legacy (de)serialisation should be OK once this is deployed.
public Slime toSlime(Application application) { Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString(idField, application.id().serializedForm()); root.setLong(createdAtField, application.createdAt().toEpochMilli()); root.setString(deploymentSpecField, application.deploymentSpec().xmlForm()); root.setString(validationOverridesField, application.validationOverrides().xmlForm()); deploymentsToSlime(application.deployments().values(), root.setArray(deploymentsField)); toSlime(application.deploymentJobs(), root.setObject(deploymentJobsField)); toSlime(application.change(), root, deployingField); toSlime(application.outstandingChange(), root, outstandingChangeField); application.ownershipIssueId().ifPresent(issueId -> root.setString(ownershipIssueIdField, issueId.value())); application.owner().ifPresent(owner -> root.setString(ownerField, owner.username())); application.majorVersion().ifPresent(majorVersion -> root.setLong(majorVersionField, majorVersion)); root.setDouble(queryQualityField, application.metrics().queryServiceQuality()); root.setDouble(writeQualityField, application.metrics().writeServiceQuality()); application.pemDeployKey().ifPresent(pemDeployKey -> root.setString(pemDeployKeyField, pemDeployKey)); application.legacyRotation().ifPresent(rotation -> root.setString(deprecatedRotationField, rotation.asString())); Cursor rotations = root.setArray(rotationsField); application.rotations().forEach(rotation -> rotations.addString(rotation.asString())); toSlime(application.rotationStatus(), root.setArray(rotationStatusField)); return slime; }
application.legacyRotation().ifPresent(rotation -> root.setString(deprecatedRotationField, rotation.asString()));
public Slime toSlime(Application application) { Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString(idField, application.id().serializedForm()); root.setLong(createdAtField, application.createdAt().toEpochMilli()); root.setString(deploymentSpecField, application.deploymentSpec().xmlForm()); root.setString(validationOverridesField, application.validationOverrides().xmlForm()); deploymentsToSlime(application.deployments().values(), root.setArray(deploymentsField)); toSlime(application.deploymentJobs(), root.setObject(deploymentJobsField)); toSlime(application.change(), root, deployingField); toSlime(application.outstandingChange(), root, outstandingChangeField); application.ownershipIssueId().ifPresent(issueId -> root.setString(ownershipIssueIdField, issueId.value())); application.owner().ifPresent(owner -> root.setString(ownerField, owner.username())); application.majorVersion().ifPresent(majorVersion -> root.setLong(majorVersionField, majorVersion)); root.setDouble(queryQualityField, application.metrics().queryServiceQuality()); root.setDouble(writeQualityField, application.metrics().writeServiceQuality()); application.pemDeployKey().ifPresent(pemDeployKey -> root.setString(pemDeployKeyField, pemDeployKey)); application.legacyRotation().ifPresent(rotation -> root.setString(deprecatedRotationField, rotation.asString())); Cursor rotations = root.setArray(rotationsField); application.rotations().forEach(rotation -> rotations.addString(rotation.asString())); toSlime(application.rotationStatus(), root.setArray(rotationStatusField)); return slime; }
class ApplicationSerializer { private final String idField = "id"; private final String createdAtField = "createdAt"; private final String deploymentSpecField = "deploymentSpecField"; private final String validationOverridesField = "validationOverrides"; private final String deploymentsField = "deployments"; private final String deploymentJobsField = "deploymentJobs"; private final String deployingField = "deployingField"; private final String pinnedField = "pinned"; private final String outstandingChangeField = "outstandingChangeField"; private final String ownershipIssueIdField = "ownershipIssueId"; private final String ownerField = "confirmedOwner"; private final String majorVersionField = "majorVersion"; private final String writeQualityField = "writeQuality"; private final String queryQualityField = "queryQuality"; private final String pemDeployKeyField = "pemDeployKey"; private final String rotationsField = "endpoints"; private final String deprecatedRotationField = "rotation"; private final String rotationStatusField = "rotationStatus"; private final String zoneField = "zone"; private final String environmentField = "environment"; private final String regionField = "region"; private final String deployTimeField = "deployTime"; private final String applicationBuildNumberField = "applicationBuildNumber"; private final String applicationPackageRevisionField = "applicationPackageRevision"; private final String sourceRevisionField = "sourceRevision"; private final String repositoryField = "repositoryField"; private final String branchField = "branchField"; private final String commitField = "commitField"; private final String authorEmailField = "authorEmailField"; private final String compileVersionField = "compileVersion"; private final String buildTimeField = "buildTime"; private final String lastQueriedField = "lastQueried"; private final String lastWrittenField = "lastWritten"; private final String lastQueriesPerSecondField = "lastQueriesPerSecond"; private final String lastWritesPerSecondField = "lastWritesPerSecond"; private final String projectIdField = "projectId"; private final String jobStatusField = "jobStatus"; private final String issueIdField = "jiraIssueId"; private final String builtInternallyField = "builtInternally"; private final String jobTypeField = "jobType"; private final String errorField = "jobError"; private final String lastTriggeredField = "lastTriggered"; private final String lastCompletedField = "lastCompleted"; private final String firstFailingField = "firstFailing"; private final String lastSuccessField = "lastSuccess"; private final String pausedUntilField = "pausedUntil"; private final String jobRunIdField = "id"; private final String versionField = "version"; private final String revisionField = "revision"; private final String sourceVersionField = "sourceVersion"; private final String sourceApplicationField = "sourceRevision"; private final String reasonField = "reason"; private final String atField = "at"; private final String clusterInfoField = "clusterInfo"; private final String clusterInfoFlavorField = "flavor"; private final String clusterInfoCostField = "cost"; private final String clusterInfoCpuField = "flavorCpu"; private final String clusterInfoMemField = "flavorMem"; private final String clusterInfoDiskField = "flavorDisk"; private final String clusterInfoTypeField = "clusterType"; private final String clusterInfoHostnamesField = "hostnames"; private final String clusterUtilsField = "clusterUtils"; private final String clusterUtilsCpuField = "cpu"; private final String clusterUtilsMemField = "mem"; private final String clusterUtilsDiskField = "disk"; private final String clusterUtilsDiskBusyField = "diskbusy"; private final String deploymentMetricsField = "metrics"; private final String deploymentMetricsQPSField = "queriesPerSecond"; private final String deploymentMetricsWPSField = "writesPerSecond"; private final String deploymentMetricsDocsField = "documentCount"; private final String deploymentMetricsQueryLatencyField = "queryLatencyMillis"; private final String deploymentMetricsWriteLatencyField = "writeLatencyMillis"; private final String deploymentMetricsUpdateTime = "lastUpdated"; private final String deploymentMetricsWarningsField = "warnings"; private void deploymentsToSlime(Collection<Deployment> deployments, Cursor array) { for (Deployment deployment : deployments) deploymentToSlime(deployment, array.addObject()); } private void deploymentToSlime(Deployment deployment, Cursor object) { zoneIdToSlime(deployment.zone(), object.setObject(zoneField)); object.setString(versionField, deployment.version().toString()); object.setLong(deployTimeField, deployment.at().toEpochMilli()); toSlime(deployment.applicationVersion(), object.setObject(applicationPackageRevisionField)); clusterInfoToSlime(deployment.clusterInfo(), object); clusterUtilsToSlime(deployment.clusterUtils(), object); deploymentMetricsToSlime(deployment.metrics(), object); deployment.activity().lastQueried().ifPresent(instant -> object.setLong(lastQueriedField, instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> object.setLong(lastWrittenField, instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> object.setDouble(lastQueriesPerSecondField, value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> object.setDouble(lastWritesPerSecondField, value)); } private void deploymentMetricsToSlime(DeploymentMetrics metrics, Cursor object) { Cursor root = object.setObject(deploymentMetricsField); root.setDouble(deploymentMetricsQPSField, metrics.queriesPerSecond()); root.setDouble(deploymentMetricsWPSField, metrics.writesPerSecond()); root.setDouble(deploymentMetricsDocsField, metrics.documentCount()); root.setDouble(deploymentMetricsQueryLatencyField, metrics.queryLatencyMillis()); root.setDouble(deploymentMetricsWriteLatencyField, metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> root.setLong(deploymentMetricsUpdateTime, instant.toEpochMilli())); if (!metrics.warnings().isEmpty()) { Cursor warningsObject = root.setObject(deploymentMetricsWarningsField); metrics.warnings().forEach((warning, count) -> warningsObject.setLong(warning.name(), count)); } } private void clusterInfoToSlime(Map<ClusterSpec.Id, ClusterInfo> clusters, Cursor object) { Cursor root = object.setObject(clusterInfoField); for (Map.Entry<ClusterSpec.Id, ClusterInfo> entry : clusters.entrySet()) { toSlime(entry.getValue(), root.setObject(entry.getKey().value())); } } private void toSlime(ClusterInfo info, Cursor object) { object.setString(clusterInfoFlavorField, info.getFlavor()); object.setLong(clusterInfoCostField, info.getFlavorCost()); object.setDouble(clusterInfoCpuField, info.getFlavorCPU()); object.setDouble(clusterInfoMemField, info.getFlavorMem()); object.setDouble(clusterInfoDiskField, info.getFlavorDisk()); object.setString(clusterInfoTypeField, info.getClusterType().name()); Cursor array = object.setArray(clusterInfoHostnamesField); for (String host : info.getHostnames()) { array.addString(host); } } private void clusterUtilsToSlime(Map<ClusterSpec.Id, ClusterUtilization> clusters, Cursor object) { Cursor root = object.setObject(clusterUtilsField); for (Map.Entry<ClusterSpec.Id, ClusterUtilization> entry : clusters.entrySet()) { toSlime(entry.getValue(), root.setObject(entry.getKey().value())); } } private void toSlime(ClusterUtilization utils, Cursor object) { object.setDouble(clusterUtilsCpuField, utils.getCpu()); object.setDouble(clusterUtilsMemField, utils.getMemory()); object.setDouble(clusterUtilsDiskField, utils.getDisk()); object.setDouble(clusterUtilsDiskBusyField, utils.getDiskBusy()); } private void zoneIdToSlime(ZoneId zone, Cursor object) { object.setString(environmentField, zone.environment().value()); object.setString(regionField, zone.region().value()); } private void toSlime(ApplicationVersion applicationVersion, Cursor object) { if (applicationVersion.buildNumber().isPresent() && applicationVersion.source().isPresent()) { object.setLong(applicationBuildNumberField, applicationVersion.buildNumber().getAsLong()); toSlime(applicationVersion.source().get(), object.setObject(sourceRevisionField)); applicationVersion.authorEmail().ifPresent(email -> object.setString(authorEmailField, email)); applicationVersion.compileVersion().ifPresent(version -> object.setString(compileVersionField, version.toString())); applicationVersion.buildTime().ifPresent(time -> object.setLong(buildTimeField, time.toEpochMilli())); } } private void toSlime(SourceRevision sourceRevision, Cursor object) { object.setString(repositoryField, sourceRevision.repository()); object.setString(branchField, sourceRevision.branch()); object.setString(commitField, sourceRevision.commit()); } private void toSlime(DeploymentJobs deploymentJobs, Cursor cursor) { deploymentJobs.projectId().ifPresent(projectId -> cursor.setLong(projectIdField, projectId)); jobStatusToSlime(deploymentJobs.jobStatus().values(), cursor.setArray(jobStatusField)); deploymentJobs.issueId().ifPresent(jiraIssueId -> cursor.setString(issueIdField, jiraIssueId.value())); cursor.setBool(builtInternallyField, deploymentJobs.deployedInternally()); } private void jobStatusToSlime(Collection<JobStatus> jobStatuses, Cursor jobStatusArray) { for (JobStatus jobStatus : jobStatuses) toSlime(jobStatus, jobStatusArray.addObject()); } private void toSlime(JobStatus jobStatus, Cursor object) { object.setString(jobTypeField, jobStatus.type().jobName()); if (jobStatus.jobError().isPresent()) object.setString(errorField, jobStatus.jobError().get().name()); jobStatus.lastTriggered().ifPresent(run -> jobRunToSlime(run, object, lastTriggeredField)); jobStatus.lastCompleted().ifPresent(run -> jobRunToSlime(run, object, lastCompletedField)); jobStatus.lastSuccess().ifPresent(run -> jobRunToSlime(run, object, lastSuccessField)); jobStatus.firstFailing().ifPresent(run -> jobRunToSlime(run, object, firstFailingField)); jobStatus.pausedUntil().ifPresent(until -> object.setLong(pausedUntilField, until)); } private void jobRunToSlime(JobStatus.JobRun jobRun, Cursor parent, String jobRunObjectName) { Cursor object = parent.setObject(jobRunObjectName); object.setLong(jobRunIdField, jobRun.id()); object.setString(versionField, jobRun.platform().toString()); toSlime(jobRun.application(), object.setObject(revisionField)); jobRun.sourcePlatform().ifPresent(version -> object.setString(sourceVersionField, version.toString())); jobRun.sourceApplication().ifPresent(version -> toSlime(version, object.setObject(sourceApplicationField))); object.setString(reasonField, jobRun.reason()); object.setLong(atField, jobRun.at().toEpochMilli()); } private void toSlime(Change deploying, Cursor parentObject, String fieldName) { if (deploying.isEmpty()) return; Cursor object = parentObject.setObject(fieldName); if (deploying.platform().isPresent()) object.setString(versionField, deploying.platform().get().toString()); if (deploying.application().isPresent()) toSlime(deploying.application().get(), object); if (deploying.isPinned()) object.setBool(pinnedField, true); } private void toSlime(Map<HostName, RotationStatus> rotationStatus, Cursor array) { rotationStatus.forEach((hostname, status) -> { Cursor object = array.addObject(); object.setString("hostname", hostname.value()); object.setString("status", status.name()); }); } public Application fromSlime(Slime slime) { Inspector root = slime.get(); ApplicationId id = ApplicationId.fromSerializedForm(root.field(idField).asString()); Instant createdAt = Instant.ofEpochMilli(root.field(createdAtField).asLong()); DeploymentSpec deploymentSpec = DeploymentSpec.fromXml(root.field(deploymentSpecField).asString(), false); ValidationOverrides validationOverrides = ValidationOverrides.fromXml(root.field(validationOverridesField).asString()); List<Deployment> deployments = deploymentsFromSlime(root.field(deploymentsField)); DeploymentJobs deploymentJobs = deploymentJobsFromSlime(root.field(deploymentJobsField)); Change deploying = changeFromSlime(root.field(deployingField)); Change outstandingChange = changeFromSlime(root.field(outstandingChangeField)); Optional<IssueId> ownershipIssueId = optionalString(root.field(ownershipIssueIdField)).map(IssueId::from); Optional<User> owner = optionalString(root.field(ownerField)).map(User::from); OptionalInt majorVersion = optionalInteger(root.field(majorVersionField)); ApplicationMetrics metrics = new ApplicationMetrics(root.field(queryQualityField).asDouble(), root.field(writeQualityField).asDouble()); Optional<String> pemDeployKey = optionalString(root.field(pemDeployKeyField)); Optional<RotationId> legacyRotation = optionalString(root.field(deprecatedRotationField)).map(RotationId::new); List<RotationId> rotations = rotationsFromSlime(root); Map<HostName, RotationStatus> rotationStatus = rotationStatusFromSlime(root.field(rotationStatusField)); return new Application(id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, deploying, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, legacyRotation, rotations, rotationStatus); } private List<Deployment> deploymentsFromSlime(Inspector array) { List<Deployment> deployments = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> deployments.add(deploymentFromSlime(item))); return deployments; } private Deployment deploymentFromSlime(Inspector deploymentObject) { return new Deployment(zoneIdFromSlime(deploymentObject.field(zoneField)), applicationVersionFromSlime(deploymentObject.field(applicationPackageRevisionField)), Version.fromString(deploymentObject.field(versionField).asString()), Instant.ofEpochMilli(deploymentObject.field(deployTimeField).asLong()), clusterUtilsMapFromSlime(deploymentObject.field(clusterUtilsField)), clusterInfoMapFromSlime(deploymentObject.field(clusterInfoField)), deploymentMetricsFromSlime(deploymentObject.field(deploymentMetricsField)), DeploymentActivity.create(optionalInstant(deploymentObject.field(lastQueriedField)), optionalInstant(deploymentObject.field(lastWrittenField)), optionalDouble(deploymentObject.field(lastQueriesPerSecondField)), optionalDouble(deploymentObject.field(lastWritesPerSecondField)))); } private DeploymentMetrics deploymentMetricsFromSlime(Inspector object) { Optional<Instant> instant = object.field(deploymentMetricsUpdateTime).valid() ? Optional.of(Instant.ofEpochMilli(object.field(deploymentMetricsUpdateTime).asLong())) : Optional.empty(); return new DeploymentMetrics(object.field(deploymentMetricsQPSField).asDouble(), object.field(deploymentMetricsWPSField).asDouble(), object.field(deploymentMetricsDocsField).asDouble(), object.field(deploymentMetricsQueryLatencyField).asDouble(), object.field(deploymentMetricsWriteLatencyField).asDouble(), instant, deploymentWarningsFrom(object.field(deploymentMetricsWarningsField))); } private Map<DeploymentMetrics.Warning, Integer> deploymentWarningsFrom(Inspector object) { Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>(); object.traverse((ObjectTraverser) (name, value) -> warnings.put(DeploymentMetrics.Warning.valueOf(name), (int) value.asLong())); return Collections.unmodifiableMap(warnings); } private Map<HostName, RotationStatus> rotationStatusFromSlime(Inspector object) { if (!object.valid()) { return Collections.emptyMap(); } Map<HostName, RotationStatus> rotationStatus = new TreeMap<>(); object.traverse((ArrayTraverser) (idx, inspect) -> { HostName hostname = HostName.from(inspect.field("hostname").asString()); RotationStatus status = RotationStatus.valueOf(inspect.field("status").asString()); rotationStatus.put(hostname, status); }); return Collections.unmodifiableMap(rotationStatus); } private Map<ClusterSpec.Id, ClusterInfo> clusterInfoMapFromSlime (Inspector object) { Map<ClusterSpec.Id, ClusterInfo> map = new HashMap<>(); object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterInfoFromSlime(value))); return map; } private Map<ClusterSpec.Id, ClusterUtilization> clusterUtilsMapFromSlime(Inspector object) { Map<ClusterSpec.Id, ClusterUtilization> map = new HashMap<>(); object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterUtililzationFromSlime(value))); return map; } private ClusterUtilization clusterUtililzationFromSlime(Inspector object) { double cpu = object.field(clusterUtilsCpuField).asDouble(); double mem = object.field(clusterUtilsMemField).asDouble(); double disk = object.field(clusterUtilsDiskField).asDouble(); double diskBusy = object.field(clusterUtilsDiskBusyField).asDouble(); return new ClusterUtilization(mem, cpu, disk, diskBusy); } private ClusterInfo clusterInfoFromSlime(Inspector inspector) { String flavor = inspector.field(clusterInfoFlavorField).asString(); int cost = (int)inspector.field(clusterInfoCostField).asLong(); String type = inspector.field(clusterInfoTypeField).asString(); double flavorCpu = inspector.field(clusterInfoCpuField).asDouble(); double flavorMem = inspector.field(clusterInfoMemField).asDouble(); double flavorDisk = inspector.field(clusterInfoDiskField).asDouble(); List<String> hostnames = new ArrayList<>(); inspector.field(clusterInfoHostnamesField).traverse((ArrayTraverser)(int index, Inspector value) -> hostnames.add(value.asString())); return new ClusterInfo(flavor, cost, flavorCpu, flavorMem, flavorDisk, ClusterSpec.Type.from(type), hostnames); } private ZoneId zoneIdFromSlime(Inspector object) { return ZoneId.from(object.field(environmentField).asString(), object.field(regionField).asString()); } private ApplicationVersion applicationVersionFromSlime(Inspector object) { if ( ! object.valid()) return ApplicationVersion.unknown; OptionalLong applicationBuildNumber = optionalLong(object.field(applicationBuildNumberField)); Optional<SourceRevision> sourceRevision = sourceRevisionFromSlime(object.field(sourceRevisionField)); if ( ! sourceRevision.isPresent() || ! applicationBuildNumber.isPresent()) { return ApplicationVersion.unknown; } Optional<String> authorEmail = optionalString(object.field(authorEmailField)); Optional<Version> compileVersion = optionalString(object.field(compileVersionField)).map(Version::fromString); Optional<Instant> buildTime = optionalInstant(object.field(buildTimeField)); if ( ! authorEmail.isPresent()) return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong()); if ( ! compileVersion.isPresent() || ! buildTime.isPresent()) return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong(), authorEmail.get()); return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong(), authorEmail.get(), compileVersion.get(), buildTime.get()); } private Optional<SourceRevision> sourceRevisionFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new SourceRevision(object.field(repositoryField).asString(), object.field(branchField).asString(), object.field(commitField).asString())); } private DeploymentJobs deploymentJobsFromSlime(Inspector object) { OptionalLong projectId = optionalLong(object.field(projectIdField)); List<JobStatus> jobStatusList = jobStatusListFromSlime(object.field(jobStatusField)); Optional<IssueId> issueId = optionalString(object.field(issueIdField)).map(IssueId::from); boolean builtInternally = object.field(builtInternallyField).asBool(); return new DeploymentJobs(projectId, jobStatusList, issueId, builtInternally); } private Change changeFromSlime(Inspector object) { if ( ! object.valid()) return Change.empty(); Inspector versionFieldValue = object.field(versionField); Change change = Change.empty(); if (versionFieldValue.valid()) change = Change.of(Version.fromString(versionFieldValue.asString())); if (object.field(applicationBuildNumberField).valid()) change = change.with(applicationVersionFromSlime(object)); if (object.field(pinnedField).asBool()) change = change.withPin(); return change; } private List<JobStatus> jobStatusListFromSlime(Inspector array) { List<JobStatus> jobStatusList = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> jobStatusFromSlime(item).ifPresent(jobStatusList::add)); return jobStatusList; } private Optional<JobStatus> jobStatusFromSlime(Inspector object) { Optional<JobType> jobType = JobType.fromOptionalJobName(object.field(jobTypeField).asString()); if (! jobType.isPresent()) return Optional.empty(); Optional<JobError> jobError = Optional.empty(); if (object.field(errorField).valid()) jobError = Optional.of(JobError.valueOf(object.field(errorField).asString())); return Optional.of(new JobStatus(jobType.get(), jobError, jobRunFromSlime(object.field(lastTriggeredField)), jobRunFromSlime(object.field(lastCompletedField)), jobRunFromSlime(object.field(firstFailingField)), jobRunFromSlime(object.field(lastSuccessField)), optionalLong(object.field(pausedUntilField)))); } private Optional<JobStatus.JobRun> jobRunFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new JobStatus.JobRun(object.field(jobRunIdField).asLong(), new Version(object.field(versionField).asString()), applicationVersionFromSlime(object.field(revisionField)), optionalString(object.field(sourceVersionField)).map(Version::fromString), Optional.of(object.field(sourceApplicationField)).filter(Inspector::valid).map(this::applicationVersionFromSlime), object.field(reasonField).asString(), Instant.ofEpochMilli(object.field(atField).asLong()))); } private List<RotationId> rotationsFromSlime(Inspector root) { final var rotations = rotationListFromSlime(root.field(rotationsField)); final var legacyRotation = legacyRotationFromSlime(root.field(deprecatedRotationField)); if (legacyRotation.isPresent() && ! rotations.contains(legacyRotation.get())) { rotations.add(legacyRotation.get()); } return rotations; } private List<RotationId> rotationListFromSlime(Inspector field) { final var rotations = new ArrayList<RotationId>(); field.traverse((ArrayTraverser) (idx, inspector) -> { final var rotation = new RotationId(inspector.asString()); rotations.add(rotation); }); return rotations; } private Optional<RotationId> legacyRotationFromSlime(Inspector field) { return field.valid() ? optionalString(field).map(RotationId::new) : Optional.empty(); } private OptionalLong optionalLong(Inspector field) { return field.valid() ? OptionalLong.of(field.asLong()) : OptionalLong.empty(); } private OptionalInt optionalInteger(Inspector field) { return field.valid() ? OptionalInt.of((int) field.asLong()) : OptionalInt.empty(); } private OptionalDouble optionalDouble(Inspector field) { return field.valid() ? OptionalDouble.of(field.asDouble()) : OptionalDouble.empty(); } private Optional<String> optionalString(Inspector field) { return SlimeUtils.optionalString(field); } private Optional<Instant> optionalInstant(Inspector field) { OptionalLong value = optionalLong(field); return value.isPresent() ? Optional.of(Instant.ofEpochMilli(value.getAsLong())) : Optional.empty(); } }
class ApplicationSerializer { private final String idField = "id"; private final String createdAtField = "createdAt"; private final String deploymentSpecField = "deploymentSpecField"; private final String validationOverridesField = "validationOverrides"; private final String deploymentsField = "deployments"; private final String deploymentJobsField = "deploymentJobs"; private final String deployingField = "deployingField"; private final String pinnedField = "pinned"; private final String outstandingChangeField = "outstandingChangeField"; private final String ownershipIssueIdField = "ownershipIssueId"; private final String ownerField = "confirmedOwner"; private final String majorVersionField = "majorVersion"; private final String writeQualityField = "writeQuality"; private final String queryQualityField = "queryQuality"; private final String pemDeployKeyField = "pemDeployKey"; private final String rotationsField = "endpoints"; private final String deprecatedRotationField = "rotation"; private final String rotationStatusField = "rotationStatus"; private final String zoneField = "zone"; private final String environmentField = "environment"; private final String regionField = "region"; private final String deployTimeField = "deployTime"; private final String applicationBuildNumberField = "applicationBuildNumber"; private final String applicationPackageRevisionField = "applicationPackageRevision"; private final String sourceRevisionField = "sourceRevision"; private final String repositoryField = "repositoryField"; private final String branchField = "branchField"; private final String commitField = "commitField"; private final String authorEmailField = "authorEmailField"; private final String compileVersionField = "compileVersion"; private final String buildTimeField = "buildTime"; private final String lastQueriedField = "lastQueried"; private final String lastWrittenField = "lastWritten"; private final String lastQueriesPerSecondField = "lastQueriesPerSecond"; private final String lastWritesPerSecondField = "lastWritesPerSecond"; private final String projectIdField = "projectId"; private final String jobStatusField = "jobStatus"; private final String issueIdField = "jiraIssueId"; private final String builtInternallyField = "builtInternally"; private final String jobTypeField = "jobType"; private final String errorField = "jobError"; private final String lastTriggeredField = "lastTriggered"; private final String lastCompletedField = "lastCompleted"; private final String firstFailingField = "firstFailing"; private final String lastSuccessField = "lastSuccess"; private final String pausedUntilField = "pausedUntil"; private final String jobRunIdField = "id"; private final String versionField = "version"; private final String revisionField = "revision"; private final String sourceVersionField = "sourceVersion"; private final String sourceApplicationField = "sourceRevision"; private final String reasonField = "reason"; private final String atField = "at"; private final String clusterInfoField = "clusterInfo"; private final String clusterInfoFlavorField = "flavor"; private final String clusterInfoCostField = "cost"; private final String clusterInfoCpuField = "flavorCpu"; private final String clusterInfoMemField = "flavorMem"; private final String clusterInfoDiskField = "flavorDisk"; private final String clusterInfoTypeField = "clusterType"; private final String clusterInfoHostnamesField = "hostnames"; private final String clusterUtilsField = "clusterUtils"; private final String clusterUtilsCpuField = "cpu"; private final String clusterUtilsMemField = "mem"; private final String clusterUtilsDiskField = "disk"; private final String clusterUtilsDiskBusyField = "diskbusy"; private final String deploymentMetricsField = "metrics"; private final String deploymentMetricsQPSField = "queriesPerSecond"; private final String deploymentMetricsWPSField = "writesPerSecond"; private final String deploymentMetricsDocsField = "documentCount"; private final String deploymentMetricsQueryLatencyField = "queryLatencyMillis"; private final String deploymentMetricsWriteLatencyField = "writeLatencyMillis"; private final String deploymentMetricsUpdateTime = "lastUpdated"; private final String deploymentMetricsWarningsField = "warnings"; private void deploymentsToSlime(Collection<Deployment> deployments, Cursor array) { for (Deployment deployment : deployments) deploymentToSlime(deployment, array.addObject()); } private void deploymentToSlime(Deployment deployment, Cursor object) { zoneIdToSlime(deployment.zone(), object.setObject(zoneField)); object.setString(versionField, deployment.version().toString()); object.setLong(deployTimeField, deployment.at().toEpochMilli()); toSlime(deployment.applicationVersion(), object.setObject(applicationPackageRevisionField)); clusterInfoToSlime(deployment.clusterInfo(), object); clusterUtilsToSlime(deployment.clusterUtils(), object); deploymentMetricsToSlime(deployment.metrics(), object); deployment.activity().lastQueried().ifPresent(instant -> object.setLong(lastQueriedField, instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> object.setLong(lastWrittenField, instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> object.setDouble(lastQueriesPerSecondField, value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> object.setDouble(lastWritesPerSecondField, value)); } private void deploymentMetricsToSlime(DeploymentMetrics metrics, Cursor object) { Cursor root = object.setObject(deploymentMetricsField); root.setDouble(deploymentMetricsQPSField, metrics.queriesPerSecond()); root.setDouble(deploymentMetricsWPSField, metrics.writesPerSecond()); root.setDouble(deploymentMetricsDocsField, metrics.documentCount()); root.setDouble(deploymentMetricsQueryLatencyField, metrics.queryLatencyMillis()); root.setDouble(deploymentMetricsWriteLatencyField, metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> root.setLong(deploymentMetricsUpdateTime, instant.toEpochMilli())); if (!metrics.warnings().isEmpty()) { Cursor warningsObject = root.setObject(deploymentMetricsWarningsField); metrics.warnings().forEach((warning, count) -> warningsObject.setLong(warning.name(), count)); } } private void clusterInfoToSlime(Map<ClusterSpec.Id, ClusterInfo> clusters, Cursor object) { Cursor root = object.setObject(clusterInfoField); for (Map.Entry<ClusterSpec.Id, ClusterInfo> entry : clusters.entrySet()) { toSlime(entry.getValue(), root.setObject(entry.getKey().value())); } } private void toSlime(ClusterInfo info, Cursor object) { object.setString(clusterInfoFlavorField, info.getFlavor()); object.setLong(clusterInfoCostField, info.getFlavorCost()); object.setDouble(clusterInfoCpuField, info.getFlavorCPU()); object.setDouble(clusterInfoMemField, info.getFlavorMem()); object.setDouble(clusterInfoDiskField, info.getFlavorDisk()); object.setString(clusterInfoTypeField, info.getClusterType().name()); Cursor array = object.setArray(clusterInfoHostnamesField); for (String host : info.getHostnames()) { array.addString(host); } } private void clusterUtilsToSlime(Map<ClusterSpec.Id, ClusterUtilization> clusters, Cursor object) { Cursor root = object.setObject(clusterUtilsField); for (Map.Entry<ClusterSpec.Id, ClusterUtilization> entry : clusters.entrySet()) { toSlime(entry.getValue(), root.setObject(entry.getKey().value())); } } private void toSlime(ClusterUtilization utils, Cursor object) { object.setDouble(clusterUtilsCpuField, utils.getCpu()); object.setDouble(clusterUtilsMemField, utils.getMemory()); object.setDouble(clusterUtilsDiskField, utils.getDisk()); object.setDouble(clusterUtilsDiskBusyField, utils.getDiskBusy()); } private void zoneIdToSlime(ZoneId zone, Cursor object) { object.setString(environmentField, zone.environment().value()); object.setString(regionField, zone.region().value()); } private void toSlime(ApplicationVersion applicationVersion, Cursor object) { if (applicationVersion.buildNumber().isPresent() && applicationVersion.source().isPresent()) { object.setLong(applicationBuildNumberField, applicationVersion.buildNumber().getAsLong()); toSlime(applicationVersion.source().get(), object.setObject(sourceRevisionField)); applicationVersion.authorEmail().ifPresent(email -> object.setString(authorEmailField, email)); applicationVersion.compileVersion().ifPresent(version -> object.setString(compileVersionField, version.toString())); applicationVersion.buildTime().ifPresent(time -> object.setLong(buildTimeField, time.toEpochMilli())); } } private void toSlime(SourceRevision sourceRevision, Cursor object) { object.setString(repositoryField, sourceRevision.repository()); object.setString(branchField, sourceRevision.branch()); object.setString(commitField, sourceRevision.commit()); } private void toSlime(DeploymentJobs deploymentJobs, Cursor cursor) { deploymentJobs.projectId().ifPresent(projectId -> cursor.setLong(projectIdField, projectId)); jobStatusToSlime(deploymentJobs.jobStatus().values(), cursor.setArray(jobStatusField)); deploymentJobs.issueId().ifPresent(jiraIssueId -> cursor.setString(issueIdField, jiraIssueId.value())); cursor.setBool(builtInternallyField, deploymentJobs.deployedInternally()); } private void jobStatusToSlime(Collection<JobStatus> jobStatuses, Cursor jobStatusArray) { for (JobStatus jobStatus : jobStatuses) toSlime(jobStatus, jobStatusArray.addObject()); } private void toSlime(JobStatus jobStatus, Cursor object) { object.setString(jobTypeField, jobStatus.type().jobName()); if (jobStatus.jobError().isPresent()) object.setString(errorField, jobStatus.jobError().get().name()); jobStatus.lastTriggered().ifPresent(run -> jobRunToSlime(run, object, lastTriggeredField)); jobStatus.lastCompleted().ifPresent(run -> jobRunToSlime(run, object, lastCompletedField)); jobStatus.lastSuccess().ifPresent(run -> jobRunToSlime(run, object, lastSuccessField)); jobStatus.firstFailing().ifPresent(run -> jobRunToSlime(run, object, firstFailingField)); jobStatus.pausedUntil().ifPresent(until -> object.setLong(pausedUntilField, until)); } private void jobRunToSlime(JobStatus.JobRun jobRun, Cursor parent, String jobRunObjectName) { Cursor object = parent.setObject(jobRunObjectName); object.setLong(jobRunIdField, jobRun.id()); object.setString(versionField, jobRun.platform().toString()); toSlime(jobRun.application(), object.setObject(revisionField)); jobRun.sourcePlatform().ifPresent(version -> object.setString(sourceVersionField, version.toString())); jobRun.sourceApplication().ifPresent(version -> toSlime(version, object.setObject(sourceApplicationField))); object.setString(reasonField, jobRun.reason()); object.setLong(atField, jobRun.at().toEpochMilli()); } private void toSlime(Change deploying, Cursor parentObject, String fieldName) { if (deploying.isEmpty()) return; Cursor object = parentObject.setObject(fieldName); if (deploying.platform().isPresent()) object.setString(versionField, deploying.platform().get().toString()); if (deploying.application().isPresent()) toSlime(deploying.application().get(), object); if (deploying.isPinned()) object.setBool(pinnedField, true); } private void toSlime(Map<HostName, RotationStatus> rotationStatus, Cursor array) { rotationStatus.forEach((hostname, status) -> { Cursor object = array.addObject(); object.setString("hostname", hostname.value()); object.setString("status", status.name()); }); } public Application fromSlime(Slime slime) { Inspector root = slime.get(); ApplicationId id = ApplicationId.fromSerializedForm(root.field(idField).asString()); Instant createdAt = Instant.ofEpochMilli(root.field(createdAtField).asLong()); DeploymentSpec deploymentSpec = DeploymentSpec.fromXml(root.field(deploymentSpecField).asString(), false); ValidationOverrides validationOverrides = ValidationOverrides.fromXml(root.field(validationOverridesField).asString()); List<Deployment> deployments = deploymentsFromSlime(root.field(deploymentsField)); DeploymentJobs deploymentJobs = deploymentJobsFromSlime(root.field(deploymentJobsField)); Change deploying = changeFromSlime(root.field(deployingField)); Change outstandingChange = changeFromSlime(root.field(outstandingChangeField)); Optional<IssueId> ownershipIssueId = optionalString(root.field(ownershipIssueIdField)).map(IssueId::from); Optional<User> owner = optionalString(root.field(ownerField)).map(User::from); OptionalInt majorVersion = optionalInteger(root.field(majorVersionField)); ApplicationMetrics metrics = new ApplicationMetrics(root.field(queryQualityField).asDouble(), root.field(writeQualityField).asDouble()); Optional<String> pemDeployKey = optionalString(root.field(pemDeployKeyField)); Optional<RotationId> legacyRotation = optionalString(root.field(deprecatedRotationField)).map(RotationId::new); List<RotationId> rotations = rotationsFromSlime(root); Map<HostName, RotationStatus> rotationStatus = rotationStatusFromSlime(root.field(rotationStatusField)); return new Application(id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, deploying, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, legacyRotation, rotations, rotationStatus); } private List<Deployment> deploymentsFromSlime(Inspector array) { List<Deployment> deployments = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> deployments.add(deploymentFromSlime(item))); return deployments; } private Deployment deploymentFromSlime(Inspector deploymentObject) { return new Deployment(zoneIdFromSlime(deploymentObject.field(zoneField)), applicationVersionFromSlime(deploymentObject.field(applicationPackageRevisionField)), Version.fromString(deploymentObject.field(versionField).asString()), Instant.ofEpochMilli(deploymentObject.field(deployTimeField).asLong()), clusterUtilsMapFromSlime(deploymentObject.field(clusterUtilsField)), clusterInfoMapFromSlime(deploymentObject.field(clusterInfoField)), deploymentMetricsFromSlime(deploymentObject.field(deploymentMetricsField)), DeploymentActivity.create(optionalInstant(deploymentObject.field(lastQueriedField)), optionalInstant(deploymentObject.field(lastWrittenField)), optionalDouble(deploymentObject.field(lastQueriesPerSecondField)), optionalDouble(deploymentObject.field(lastWritesPerSecondField)))); } private DeploymentMetrics deploymentMetricsFromSlime(Inspector object) { Optional<Instant> instant = object.field(deploymentMetricsUpdateTime).valid() ? Optional.of(Instant.ofEpochMilli(object.field(deploymentMetricsUpdateTime).asLong())) : Optional.empty(); return new DeploymentMetrics(object.field(deploymentMetricsQPSField).asDouble(), object.field(deploymentMetricsWPSField).asDouble(), object.field(deploymentMetricsDocsField).asDouble(), object.field(deploymentMetricsQueryLatencyField).asDouble(), object.field(deploymentMetricsWriteLatencyField).asDouble(), instant, deploymentWarningsFrom(object.field(deploymentMetricsWarningsField))); } private Map<DeploymentMetrics.Warning, Integer> deploymentWarningsFrom(Inspector object) { Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>(); object.traverse((ObjectTraverser) (name, value) -> warnings.put(DeploymentMetrics.Warning.valueOf(name), (int) value.asLong())); return Collections.unmodifiableMap(warnings); } private Map<HostName, RotationStatus> rotationStatusFromSlime(Inspector object) { if (!object.valid()) { return Collections.emptyMap(); } Map<HostName, RotationStatus> rotationStatus = new TreeMap<>(); object.traverse((ArrayTraverser) (idx, inspect) -> { HostName hostname = HostName.from(inspect.field("hostname").asString()); RotationStatus status = RotationStatus.valueOf(inspect.field("status").asString()); rotationStatus.put(hostname, status); }); return Collections.unmodifiableMap(rotationStatus); } private Map<ClusterSpec.Id, ClusterInfo> clusterInfoMapFromSlime (Inspector object) { Map<ClusterSpec.Id, ClusterInfo> map = new HashMap<>(); object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterInfoFromSlime(value))); return map; } private Map<ClusterSpec.Id, ClusterUtilization> clusterUtilsMapFromSlime(Inspector object) { Map<ClusterSpec.Id, ClusterUtilization> map = new HashMap<>(); object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterUtililzationFromSlime(value))); return map; } private ClusterUtilization clusterUtililzationFromSlime(Inspector object) { double cpu = object.field(clusterUtilsCpuField).asDouble(); double mem = object.field(clusterUtilsMemField).asDouble(); double disk = object.field(clusterUtilsDiskField).asDouble(); double diskBusy = object.field(clusterUtilsDiskBusyField).asDouble(); return new ClusterUtilization(mem, cpu, disk, diskBusy); } private ClusterInfo clusterInfoFromSlime(Inspector inspector) { String flavor = inspector.field(clusterInfoFlavorField).asString(); int cost = (int)inspector.field(clusterInfoCostField).asLong(); String type = inspector.field(clusterInfoTypeField).asString(); double flavorCpu = inspector.field(clusterInfoCpuField).asDouble(); double flavorMem = inspector.field(clusterInfoMemField).asDouble(); double flavorDisk = inspector.field(clusterInfoDiskField).asDouble(); List<String> hostnames = new ArrayList<>(); inspector.field(clusterInfoHostnamesField).traverse((ArrayTraverser)(int index, Inspector value) -> hostnames.add(value.asString())); return new ClusterInfo(flavor, cost, flavorCpu, flavorMem, flavorDisk, ClusterSpec.Type.from(type), hostnames); } private ZoneId zoneIdFromSlime(Inspector object) { return ZoneId.from(object.field(environmentField).asString(), object.field(regionField).asString()); } private ApplicationVersion applicationVersionFromSlime(Inspector object) { if ( ! object.valid()) return ApplicationVersion.unknown; OptionalLong applicationBuildNumber = optionalLong(object.field(applicationBuildNumberField)); Optional<SourceRevision> sourceRevision = sourceRevisionFromSlime(object.field(sourceRevisionField)); if ( ! sourceRevision.isPresent() || ! applicationBuildNumber.isPresent()) { return ApplicationVersion.unknown; } Optional<String> authorEmail = optionalString(object.field(authorEmailField)); Optional<Version> compileVersion = optionalString(object.field(compileVersionField)).map(Version::fromString); Optional<Instant> buildTime = optionalInstant(object.field(buildTimeField)); if ( ! authorEmail.isPresent()) return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong()); if ( ! compileVersion.isPresent() || ! buildTime.isPresent()) return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong(), authorEmail.get()); return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong(), authorEmail.get(), compileVersion.get(), buildTime.get()); } private Optional<SourceRevision> sourceRevisionFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new SourceRevision(object.field(repositoryField).asString(), object.field(branchField).asString(), object.field(commitField).asString())); } private DeploymentJobs deploymentJobsFromSlime(Inspector object) { OptionalLong projectId = optionalLong(object.field(projectIdField)); List<JobStatus> jobStatusList = jobStatusListFromSlime(object.field(jobStatusField)); Optional<IssueId> issueId = optionalString(object.field(issueIdField)).map(IssueId::from); boolean builtInternally = object.field(builtInternallyField).asBool(); return new DeploymentJobs(projectId, jobStatusList, issueId, builtInternally); } private Change changeFromSlime(Inspector object) { if ( ! object.valid()) return Change.empty(); Inspector versionFieldValue = object.field(versionField); Change change = Change.empty(); if (versionFieldValue.valid()) change = Change.of(Version.fromString(versionFieldValue.asString())); if (object.field(applicationBuildNumberField).valid()) change = change.with(applicationVersionFromSlime(object)); if (object.field(pinnedField).asBool()) change = change.withPin(); return change; } private List<JobStatus> jobStatusListFromSlime(Inspector array) { List<JobStatus> jobStatusList = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> jobStatusFromSlime(item).ifPresent(jobStatusList::add)); return jobStatusList; } private Optional<JobStatus> jobStatusFromSlime(Inspector object) { Optional<JobType> jobType = JobType.fromOptionalJobName(object.field(jobTypeField).asString()); if (! jobType.isPresent()) return Optional.empty(); Optional<JobError> jobError = Optional.empty(); if (object.field(errorField).valid()) jobError = Optional.of(JobError.valueOf(object.field(errorField).asString())); return Optional.of(new JobStatus(jobType.get(), jobError, jobRunFromSlime(object.field(lastTriggeredField)), jobRunFromSlime(object.field(lastCompletedField)), jobRunFromSlime(object.field(firstFailingField)), jobRunFromSlime(object.field(lastSuccessField)), optionalLong(object.field(pausedUntilField)))); } private Optional<JobStatus.JobRun> jobRunFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new JobStatus.JobRun(object.field(jobRunIdField).asLong(), new Version(object.field(versionField).asString()), applicationVersionFromSlime(object.field(revisionField)), optionalString(object.field(sourceVersionField)).map(Version::fromString), Optional.of(object.field(sourceApplicationField)).filter(Inspector::valid).map(this::applicationVersionFromSlime), object.field(reasonField).asString(), Instant.ofEpochMilli(object.field(atField).asLong()))); } private List<RotationId> rotationsFromSlime(Inspector root) { final var rotations = rotationListFromSlime(root.field(rotationsField)); final var legacyRotation = legacyRotationFromSlime(root.field(deprecatedRotationField)); if (legacyRotation.isPresent() && ! rotations.contains(legacyRotation.get())) { rotations.add(legacyRotation.get()); } return rotations; } private List<RotationId> rotationListFromSlime(Inspector field) { final var rotations = new ArrayList<RotationId>(); field.traverse((ArrayTraverser) (idx, inspector) -> { final var rotation = new RotationId(inspector.asString()); rotations.add(rotation); }); return rotations; } private Optional<RotationId> legacyRotationFromSlime(Inspector field) { return field.valid() ? optionalString(field).map(RotationId::new) : Optional.empty(); } private OptionalLong optionalLong(Inspector field) { return field.valid() ? OptionalLong.of(field.asLong()) : OptionalLong.empty(); } private OptionalInt optionalInteger(Inspector field) { return field.valid() ? OptionalInt.of((int) field.asLong()) : OptionalInt.empty(); } private OptionalDouble optionalDouble(Inspector field) { return field.valid() ? OptionalDouble.of(field.asDouble()) : OptionalDouble.empty(); } private Optional<String> optionalString(Inspector field) { return SlimeUtils.optionalString(field); } private Optional<Instant> optionalInstant(Inspector field) { OptionalLong value = optionalLong(field); return value.isPresent() ? Optional.of(Instant.ofEpochMilli(value.getAsLong())) : Optional.empty(); } }
I agree, but I didn't feel brave enough to make assumptions about order or anything like that :)
public Slime toSlime(Application application) { Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString(idField, application.id().serializedForm()); root.setLong(createdAtField, application.createdAt().toEpochMilli()); root.setString(deploymentSpecField, application.deploymentSpec().xmlForm()); root.setString(validationOverridesField, application.validationOverrides().xmlForm()); deploymentsToSlime(application.deployments().values(), root.setArray(deploymentsField)); toSlime(application.deploymentJobs(), root.setObject(deploymentJobsField)); toSlime(application.change(), root, deployingField); toSlime(application.outstandingChange(), root, outstandingChangeField); application.ownershipIssueId().ifPresent(issueId -> root.setString(ownershipIssueIdField, issueId.value())); application.owner().ifPresent(owner -> root.setString(ownerField, owner.username())); application.majorVersion().ifPresent(majorVersion -> root.setLong(majorVersionField, majorVersion)); root.setDouble(queryQualityField, application.metrics().queryServiceQuality()); root.setDouble(writeQualityField, application.metrics().writeServiceQuality()); application.pemDeployKey().ifPresent(pemDeployKey -> root.setString(pemDeployKeyField, pemDeployKey)); application.legacyRotation().ifPresent(rotation -> root.setString(deprecatedRotationField, rotation.asString())); Cursor rotations = root.setArray(rotationsField); application.rotations().forEach(rotation -> rotations.addString(rotation.asString())); toSlime(application.rotationStatus(), root.setArray(rotationStatusField)); return slime; }
application.legacyRotation().ifPresent(rotation -> root.setString(deprecatedRotationField, rotation.asString()));
public Slime toSlime(Application application) { Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString(idField, application.id().serializedForm()); root.setLong(createdAtField, application.createdAt().toEpochMilli()); root.setString(deploymentSpecField, application.deploymentSpec().xmlForm()); root.setString(validationOverridesField, application.validationOverrides().xmlForm()); deploymentsToSlime(application.deployments().values(), root.setArray(deploymentsField)); toSlime(application.deploymentJobs(), root.setObject(deploymentJobsField)); toSlime(application.change(), root, deployingField); toSlime(application.outstandingChange(), root, outstandingChangeField); application.ownershipIssueId().ifPresent(issueId -> root.setString(ownershipIssueIdField, issueId.value())); application.owner().ifPresent(owner -> root.setString(ownerField, owner.username())); application.majorVersion().ifPresent(majorVersion -> root.setLong(majorVersionField, majorVersion)); root.setDouble(queryQualityField, application.metrics().queryServiceQuality()); root.setDouble(writeQualityField, application.metrics().writeServiceQuality()); application.pemDeployKey().ifPresent(pemDeployKey -> root.setString(pemDeployKeyField, pemDeployKey)); application.legacyRotation().ifPresent(rotation -> root.setString(deprecatedRotationField, rotation.asString())); Cursor rotations = root.setArray(rotationsField); application.rotations().forEach(rotation -> rotations.addString(rotation.asString())); toSlime(application.rotationStatus(), root.setArray(rotationStatusField)); return slime; }
class ApplicationSerializer { private final String idField = "id"; private final String createdAtField = "createdAt"; private final String deploymentSpecField = "deploymentSpecField"; private final String validationOverridesField = "validationOverrides"; private final String deploymentsField = "deployments"; private final String deploymentJobsField = "deploymentJobs"; private final String deployingField = "deployingField"; private final String pinnedField = "pinned"; private final String outstandingChangeField = "outstandingChangeField"; private final String ownershipIssueIdField = "ownershipIssueId"; private final String ownerField = "confirmedOwner"; private final String majorVersionField = "majorVersion"; private final String writeQualityField = "writeQuality"; private final String queryQualityField = "queryQuality"; private final String pemDeployKeyField = "pemDeployKey"; private final String rotationsField = "endpoints"; private final String deprecatedRotationField = "rotation"; private final String rotationStatusField = "rotationStatus"; private final String zoneField = "zone"; private final String environmentField = "environment"; private final String regionField = "region"; private final String deployTimeField = "deployTime"; private final String applicationBuildNumberField = "applicationBuildNumber"; private final String applicationPackageRevisionField = "applicationPackageRevision"; private final String sourceRevisionField = "sourceRevision"; private final String repositoryField = "repositoryField"; private final String branchField = "branchField"; private final String commitField = "commitField"; private final String authorEmailField = "authorEmailField"; private final String compileVersionField = "compileVersion"; private final String buildTimeField = "buildTime"; private final String lastQueriedField = "lastQueried"; private final String lastWrittenField = "lastWritten"; private final String lastQueriesPerSecondField = "lastQueriesPerSecond"; private final String lastWritesPerSecondField = "lastWritesPerSecond"; private final String projectIdField = "projectId"; private final String jobStatusField = "jobStatus"; private final String issueIdField = "jiraIssueId"; private final String builtInternallyField = "builtInternally"; private final String jobTypeField = "jobType"; private final String errorField = "jobError"; private final String lastTriggeredField = "lastTriggered"; private final String lastCompletedField = "lastCompleted"; private final String firstFailingField = "firstFailing"; private final String lastSuccessField = "lastSuccess"; private final String pausedUntilField = "pausedUntil"; private final String jobRunIdField = "id"; private final String versionField = "version"; private final String revisionField = "revision"; private final String sourceVersionField = "sourceVersion"; private final String sourceApplicationField = "sourceRevision"; private final String reasonField = "reason"; private final String atField = "at"; private final String clusterInfoField = "clusterInfo"; private final String clusterInfoFlavorField = "flavor"; private final String clusterInfoCostField = "cost"; private final String clusterInfoCpuField = "flavorCpu"; private final String clusterInfoMemField = "flavorMem"; private final String clusterInfoDiskField = "flavorDisk"; private final String clusterInfoTypeField = "clusterType"; private final String clusterInfoHostnamesField = "hostnames"; private final String clusterUtilsField = "clusterUtils"; private final String clusterUtilsCpuField = "cpu"; private final String clusterUtilsMemField = "mem"; private final String clusterUtilsDiskField = "disk"; private final String clusterUtilsDiskBusyField = "diskbusy"; private final String deploymentMetricsField = "metrics"; private final String deploymentMetricsQPSField = "queriesPerSecond"; private final String deploymentMetricsWPSField = "writesPerSecond"; private final String deploymentMetricsDocsField = "documentCount"; private final String deploymentMetricsQueryLatencyField = "queryLatencyMillis"; private final String deploymentMetricsWriteLatencyField = "writeLatencyMillis"; private final String deploymentMetricsUpdateTime = "lastUpdated"; private final String deploymentMetricsWarningsField = "warnings"; private void deploymentsToSlime(Collection<Deployment> deployments, Cursor array) { for (Deployment deployment : deployments) deploymentToSlime(deployment, array.addObject()); } private void deploymentToSlime(Deployment deployment, Cursor object) { zoneIdToSlime(deployment.zone(), object.setObject(zoneField)); object.setString(versionField, deployment.version().toString()); object.setLong(deployTimeField, deployment.at().toEpochMilli()); toSlime(deployment.applicationVersion(), object.setObject(applicationPackageRevisionField)); clusterInfoToSlime(deployment.clusterInfo(), object); clusterUtilsToSlime(deployment.clusterUtils(), object); deploymentMetricsToSlime(deployment.metrics(), object); deployment.activity().lastQueried().ifPresent(instant -> object.setLong(lastQueriedField, instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> object.setLong(lastWrittenField, instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> object.setDouble(lastQueriesPerSecondField, value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> object.setDouble(lastWritesPerSecondField, value)); } private void deploymentMetricsToSlime(DeploymentMetrics metrics, Cursor object) { Cursor root = object.setObject(deploymentMetricsField); root.setDouble(deploymentMetricsQPSField, metrics.queriesPerSecond()); root.setDouble(deploymentMetricsWPSField, metrics.writesPerSecond()); root.setDouble(deploymentMetricsDocsField, metrics.documentCount()); root.setDouble(deploymentMetricsQueryLatencyField, metrics.queryLatencyMillis()); root.setDouble(deploymentMetricsWriteLatencyField, metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> root.setLong(deploymentMetricsUpdateTime, instant.toEpochMilli())); if (!metrics.warnings().isEmpty()) { Cursor warningsObject = root.setObject(deploymentMetricsWarningsField); metrics.warnings().forEach((warning, count) -> warningsObject.setLong(warning.name(), count)); } } private void clusterInfoToSlime(Map<ClusterSpec.Id, ClusterInfo> clusters, Cursor object) { Cursor root = object.setObject(clusterInfoField); for (Map.Entry<ClusterSpec.Id, ClusterInfo> entry : clusters.entrySet()) { toSlime(entry.getValue(), root.setObject(entry.getKey().value())); } } private void toSlime(ClusterInfo info, Cursor object) { object.setString(clusterInfoFlavorField, info.getFlavor()); object.setLong(clusterInfoCostField, info.getFlavorCost()); object.setDouble(clusterInfoCpuField, info.getFlavorCPU()); object.setDouble(clusterInfoMemField, info.getFlavorMem()); object.setDouble(clusterInfoDiskField, info.getFlavorDisk()); object.setString(clusterInfoTypeField, info.getClusterType().name()); Cursor array = object.setArray(clusterInfoHostnamesField); for (String host : info.getHostnames()) { array.addString(host); } } private void clusterUtilsToSlime(Map<ClusterSpec.Id, ClusterUtilization> clusters, Cursor object) { Cursor root = object.setObject(clusterUtilsField); for (Map.Entry<ClusterSpec.Id, ClusterUtilization> entry : clusters.entrySet()) { toSlime(entry.getValue(), root.setObject(entry.getKey().value())); } } private void toSlime(ClusterUtilization utils, Cursor object) { object.setDouble(clusterUtilsCpuField, utils.getCpu()); object.setDouble(clusterUtilsMemField, utils.getMemory()); object.setDouble(clusterUtilsDiskField, utils.getDisk()); object.setDouble(clusterUtilsDiskBusyField, utils.getDiskBusy()); } private void zoneIdToSlime(ZoneId zone, Cursor object) { object.setString(environmentField, zone.environment().value()); object.setString(regionField, zone.region().value()); } private void toSlime(ApplicationVersion applicationVersion, Cursor object) { if (applicationVersion.buildNumber().isPresent() && applicationVersion.source().isPresent()) { object.setLong(applicationBuildNumberField, applicationVersion.buildNumber().getAsLong()); toSlime(applicationVersion.source().get(), object.setObject(sourceRevisionField)); applicationVersion.authorEmail().ifPresent(email -> object.setString(authorEmailField, email)); applicationVersion.compileVersion().ifPresent(version -> object.setString(compileVersionField, version.toString())); applicationVersion.buildTime().ifPresent(time -> object.setLong(buildTimeField, time.toEpochMilli())); } } private void toSlime(SourceRevision sourceRevision, Cursor object) { object.setString(repositoryField, sourceRevision.repository()); object.setString(branchField, sourceRevision.branch()); object.setString(commitField, sourceRevision.commit()); } private void toSlime(DeploymentJobs deploymentJobs, Cursor cursor) { deploymentJobs.projectId().ifPresent(projectId -> cursor.setLong(projectIdField, projectId)); jobStatusToSlime(deploymentJobs.jobStatus().values(), cursor.setArray(jobStatusField)); deploymentJobs.issueId().ifPresent(jiraIssueId -> cursor.setString(issueIdField, jiraIssueId.value())); cursor.setBool(builtInternallyField, deploymentJobs.deployedInternally()); } private void jobStatusToSlime(Collection<JobStatus> jobStatuses, Cursor jobStatusArray) { for (JobStatus jobStatus : jobStatuses) toSlime(jobStatus, jobStatusArray.addObject()); } private void toSlime(JobStatus jobStatus, Cursor object) { object.setString(jobTypeField, jobStatus.type().jobName()); if (jobStatus.jobError().isPresent()) object.setString(errorField, jobStatus.jobError().get().name()); jobStatus.lastTriggered().ifPresent(run -> jobRunToSlime(run, object, lastTriggeredField)); jobStatus.lastCompleted().ifPresent(run -> jobRunToSlime(run, object, lastCompletedField)); jobStatus.lastSuccess().ifPresent(run -> jobRunToSlime(run, object, lastSuccessField)); jobStatus.firstFailing().ifPresent(run -> jobRunToSlime(run, object, firstFailingField)); jobStatus.pausedUntil().ifPresent(until -> object.setLong(pausedUntilField, until)); } private void jobRunToSlime(JobStatus.JobRun jobRun, Cursor parent, String jobRunObjectName) { Cursor object = parent.setObject(jobRunObjectName); object.setLong(jobRunIdField, jobRun.id()); object.setString(versionField, jobRun.platform().toString()); toSlime(jobRun.application(), object.setObject(revisionField)); jobRun.sourcePlatform().ifPresent(version -> object.setString(sourceVersionField, version.toString())); jobRun.sourceApplication().ifPresent(version -> toSlime(version, object.setObject(sourceApplicationField))); object.setString(reasonField, jobRun.reason()); object.setLong(atField, jobRun.at().toEpochMilli()); } private void toSlime(Change deploying, Cursor parentObject, String fieldName) { if (deploying.isEmpty()) return; Cursor object = parentObject.setObject(fieldName); if (deploying.platform().isPresent()) object.setString(versionField, deploying.platform().get().toString()); if (deploying.application().isPresent()) toSlime(deploying.application().get(), object); if (deploying.isPinned()) object.setBool(pinnedField, true); } private void toSlime(Map<HostName, RotationStatus> rotationStatus, Cursor array) { rotationStatus.forEach((hostname, status) -> { Cursor object = array.addObject(); object.setString("hostname", hostname.value()); object.setString("status", status.name()); }); } public Application fromSlime(Slime slime) { Inspector root = slime.get(); ApplicationId id = ApplicationId.fromSerializedForm(root.field(idField).asString()); Instant createdAt = Instant.ofEpochMilli(root.field(createdAtField).asLong()); DeploymentSpec deploymentSpec = DeploymentSpec.fromXml(root.field(deploymentSpecField).asString(), false); ValidationOverrides validationOverrides = ValidationOverrides.fromXml(root.field(validationOverridesField).asString()); List<Deployment> deployments = deploymentsFromSlime(root.field(deploymentsField)); DeploymentJobs deploymentJobs = deploymentJobsFromSlime(root.field(deploymentJobsField)); Change deploying = changeFromSlime(root.field(deployingField)); Change outstandingChange = changeFromSlime(root.field(outstandingChangeField)); Optional<IssueId> ownershipIssueId = optionalString(root.field(ownershipIssueIdField)).map(IssueId::from); Optional<User> owner = optionalString(root.field(ownerField)).map(User::from); OptionalInt majorVersion = optionalInteger(root.field(majorVersionField)); ApplicationMetrics metrics = new ApplicationMetrics(root.field(queryQualityField).asDouble(), root.field(writeQualityField).asDouble()); Optional<String> pemDeployKey = optionalString(root.field(pemDeployKeyField)); Optional<RotationId> legacyRotation = optionalString(root.field(deprecatedRotationField)).map(RotationId::new); List<RotationId> rotations = rotationsFromSlime(root); Map<HostName, RotationStatus> rotationStatus = rotationStatusFromSlime(root.field(rotationStatusField)); return new Application(id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, deploying, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, legacyRotation, rotations, rotationStatus); } private List<Deployment> deploymentsFromSlime(Inspector array) { List<Deployment> deployments = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> deployments.add(deploymentFromSlime(item))); return deployments; } private Deployment deploymentFromSlime(Inspector deploymentObject) { return new Deployment(zoneIdFromSlime(deploymentObject.field(zoneField)), applicationVersionFromSlime(deploymentObject.field(applicationPackageRevisionField)), Version.fromString(deploymentObject.field(versionField).asString()), Instant.ofEpochMilli(deploymentObject.field(deployTimeField).asLong()), clusterUtilsMapFromSlime(deploymentObject.field(clusterUtilsField)), clusterInfoMapFromSlime(deploymentObject.field(clusterInfoField)), deploymentMetricsFromSlime(deploymentObject.field(deploymentMetricsField)), DeploymentActivity.create(optionalInstant(deploymentObject.field(lastQueriedField)), optionalInstant(deploymentObject.field(lastWrittenField)), optionalDouble(deploymentObject.field(lastQueriesPerSecondField)), optionalDouble(deploymentObject.field(lastWritesPerSecondField)))); } private DeploymentMetrics deploymentMetricsFromSlime(Inspector object) { Optional<Instant> instant = object.field(deploymentMetricsUpdateTime).valid() ? Optional.of(Instant.ofEpochMilli(object.field(deploymentMetricsUpdateTime).asLong())) : Optional.empty(); return new DeploymentMetrics(object.field(deploymentMetricsQPSField).asDouble(), object.field(deploymentMetricsWPSField).asDouble(), object.field(deploymentMetricsDocsField).asDouble(), object.field(deploymentMetricsQueryLatencyField).asDouble(), object.field(deploymentMetricsWriteLatencyField).asDouble(), instant, deploymentWarningsFrom(object.field(deploymentMetricsWarningsField))); } private Map<DeploymentMetrics.Warning, Integer> deploymentWarningsFrom(Inspector object) { Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>(); object.traverse((ObjectTraverser) (name, value) -> warnings.put(DeploymentMetrics.Warning.valueOf(name), (int) value.asLong())); return Collections.unmodifiableMap(warnings); } private Map<HostName, RotationStatus> rotationStatusFromSlime(Inspector object) { if (!object.valid()) { return Collections.emptyMap(); } Map<HostName, RotationStatus> rotationStatus = new TreeMap<>(); object.traverse((ArrayTraverser) (idx, inspect) -> { HostName hostname = HostName.from(inspect.field("hostname").asString()); RotationStatus status = RotationStatus.valueOf(inspect.field("status").asString()); rotationStatus.put(hostname, status); }); return Collections.unmodifiableMap(rotationStatus); } private Map<ClusterSpec.Id, ClusterInfo> clusterInfoMapFromSlime (Inspector object) { Map<ClusterSpec.Id, ClusterInfo> map = new HashMap<>(); object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterInfoFromSlime(value))); return map; } private Map<ClusterSpec.Id, ClusterUtilization> clusterUtilsMapFromSlime(Inspector object) { Map<ClusterSpec.Id, ClusterUtilization> map = new HashMap<>(); object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterUtililzationFromSlime(value))); return map; } private ClusterUtilization clusterUtililzationFromSlime(Inspector object) { double cpu = object.field(clusterUtilsCpuField).asDouble(); double mem = object.field(clusterUtilsMemField).asDouble(); double disk = object.field(clusterUtilsDiskField).asDouble(); double diskBusy = object.field(clusterUtilsDiskBusyField).asDouble(); return new ClusterUtilization(mem, cpu, disk, diskBusy); } private ClusterInfo clusterInfoFromSlime(Inspector inspector) { String flavor = inspector.field(clusterInfoFlavorField).asString(); int cost = (int)inspector.field(clusterInfoCostField).asLong(); String type = inspector.field(clusterInfoTypeField).asString(); double flavorCpu = inspector.field(clusterInfoCpuField).asDouble(); double flavorMem = inspector.field(clusterInfoMemField).asDouble(); double flavorDisk = inspector.field(clusterInfoDiskField).asDouble(); List<String> hostnames = new ArrayList<>(); inspector.field(clusterInfoHostnamesField).traverse((ArrayTraverser)(int index, Inspector value) -> hostnames.add(value.asString())); return new ClusterInfo(flavor, cost, flavorCpu, flavorMem, flavorDisk, ClusterSpec.Type.from(type), hostnames); } private ZoneId zoneIdFromSlime(Inspector object) { return ZoneId.from(object.field(environmentField).asString(), object.field(regionField).asString()); } private ApplicationVersion applicationVersionFromSlime(Inspector object) { if ( ! object.valid()) return ApplicationVersion.unknown; OptionalLong applicationBuildNumber = optionalLong(object.field(applicationBuildNumberField)); Optional<SourceRevision> sourceRevision = sourceRevisionFromSlime(object.field(sourceRevisionField)); if ( ! sourceRevision.isPresent() || ! applicationBuildNumber.isPresent()) { return ApplicationVersion.unknown; } Optional<String> authorEmail = optionalString(object.field(authorEmailField)); Optional<Version> compileVersion = optionalString(object.field(compileVersionField)).map(Version::fromString); Optional<Instant> buildTime = optionalInstant(object.field(buildTimeField)); if ( ! authorEmail.isPresent()) return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong()); if ( ! compileVersion.isPresent() || ! buildTime.isPresent()) return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong(), authorEmail.get()); return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong(), authorEmail.get(), compileVersion.get(), buildTime.get()); } private Optional<SourceRevision> sourceRevisionFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new SourceRevision(object.field(repositoryField).asString(), object.field(branchField).asString(), object.field(commitField).asString())); } private DeploymentJobs deploymentJobsFromSlime(Inspector object) { OptionalLong projectId = optionalLong(object.field(projectIdField)); List<JobStatus> jobStatusList = jobStatusListFromSlime(object.field(jobStatusField)); Optional<IssueId> issueId = optionalString(object.field(issueIdField)).map(IssueId::from); boolean builtInternally = object.field(builtInternallyField).asBool(); return new DeploymentJobs(projectId, jobStatusList, issueId, builtInternally); } private Change changeFromSlime(Inspector object) { if ( ! object.valid()) return Change.empty(); Inspector versionFieldValue = object.field(versionField); Change change = Change.empty(); if (versionFieldValue.valid()) change = Change.of(Version.fromString(versionFieldValue.asString())); if (object.field(applicationBuildNumberField).valid()) change = change.with(applicationVersionFromSlime(object)); if (object.field(pinnedField).asBool()) change = change.withPin(); return change; } private List<JobStatus> jobStatusListFromSlime(Inspector array) { List<JobStatus> jobStatusList = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> jobStatusFromSlime(item).ifPresent(jobStatusList::add)); return jobStatusList; } private Optional<JobStatus> jobStatusFromSlime(Inspector object) { Optional<JobType> jobType = JobType.fromOptionalJobName(object.field(jobTypeField).asString()); if (! jobType.isPresent()) return Optional.empty(); Optional<JobError> jobError = Optional.empty(); if (object.field(errorField).valid()) jobError = Optional.of(JobError.valueOf(object.field(errorField).asString())); return Optional.of(new JobStatus(jobType.get(), jobError, jobRunFromSlime(object.field(lastTriggeredField)), jobRunFromSlime(object.field(lastCompletedField)), jobRunFromSlime(object.field(firstFailingField)), jobRunFromSlime(object.field(lastSuccessField)), optionalLong(object.field(pausedUntilField)))); } private Optional<JobStatus.JobRun> jobRunFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new JobStatus.JobRun(object.field(jobRunIdField).asLong(), new Version(object.field(versionField).asString()), applicationVersionFromSlime(object.field(revisionField)), optionalString(object.field(sourceVersionField)).map(Version::fromString), Optional.of(object.field(sourceApplicationField)).filter(Inspector::valid).map(this::applicationVersionFromSlime), object.field(reasonField).asString(), Instant.ofEpochMilli(object.field(atField).asLong()))); } private List<RotationId> rotationsFromSlime(Inspector root) { final var rotations = rotationListFromSlime(root.field(rotationsField)); final var legacyRotation = legacyRotationFromSlime(root.field(deprecatedRotationField)); if (legacyRotation.isPresent() && ! rotations.contains(legacyRotation.get())) { rotations.add(legacyRotation.get()); } return rotations; } private List<RotationId> rotationListFromSlime(Inspector field) { final var rotations = new ArrayList<RotationId>(); field.traverse((ArrayTraverser) (idx, inspector) -> { final var rotation = new RotationId(inspector.asString()); rotations.add(rotation); }); return rotations; } private Optional<RotationId> legacyRotationFromSlime(Inspector field) { return field.valid() ? optionalString(field).map(RotationId::new) : Optional.empty(); } private OptionalLong optionalLong(Inspector field) { return field.valid() ? OptionalLong.of(field.asLong()) : OptionalLong.empty(); } private OptionalInt optionalInteger(Inspector field) { return field.valid() ? OptionalInt.of((int) field.asLong()) : OptionalInt.empty(); } private OptionalDouble optionalDouble(Inspector field) { return field.valid() ? OptionalDouble.of(field.asDouble()) : OptionalDouble.empty(); } private Optional<String> optionalString(Inspector field) { return SlimeUtils.optionalString(field); } private Optional<Instant> optionalInstant(Inspector field) { OptionalLong value = optionalLong(field); return value.isPresent() ? Optional.of(Instant.ofEpochMilli(value.getAsLong())) : Optional.empty(); } }
class ApplicationSerializer { private final String idField = "id"; private final String createdAtField = "createdAt"; private final String deploymentSpecField = "deploymentSpecField"; private final String validationOverridesField = "validationOverrides"; private final String deploymentsField = "deployments"; private final String deploymentJobsField = "deploymentJobs"; private final String deployingField = "deployingField"; private final String pinnedField = "pinned"; private final String outstandingChangeField = "outstandingChangeField"; private final String ownershipIssueIdField = "ownershipIssueId"; private final String ownerField = "confirmedOwner"; private final String majorVersionField = "majorVersion"; private final String writeQualityField = "writeQuality"; private final String queryQualityField = "queryQuality"; private final String pemDeployKeyField = "pemDeployKey"; private final String rotationsField = "endpoints"; private final String deprecatedRotationField = "rotation"; private final String rotationStatusField = "rotationStatus"; private final String zoneField = "zone"; private final String environmentField = "environment"; private final String regionField = "region"; private final String deployTimeField = "deployTime"; private final String applicationBuildNumberField = "applicationBuildNumber"; private final String applicationPackageRevisionField = "applicationPackageRevision"; private final String sourceRevisionField = "sourceRevision"; private final String repositoryField = "repositoryField"; private final String branchField = "branchField"; private final String commitField = "commitField"; private final String authorEmailField = "authorEmailField"; private final String compileVersionField = "compileVersion"; private final String buildTimeField = "buildTime"; private final String lastQueriedField = "lastQueried"; private final String lastWrittenField = "lastWritten"; private final String lastQueriesPerSecondField = "lastQueriesPerSecond"; private final String lastWritesPerSecondField = "lastWritesPerSecond"; private final String projectIdField = "projectId"; private final String jobStatusField = "jobStatus"; private final String issueIdField = "jiraIssueId"; private final String builtInternallyField = "builtInternally"; private final String jobTypeField = "jobType"; private final String errorField = "jobError"; private final String lastTriggeredField = "lastTriggered"; private final String lastCompletedField = "lastCompleted"; private final String firstFailingField = "firstFailing"; private final String lastSuccessField = "lastSuccess"; private final String pausedUntilField = "pausedUntil"; private final String jobRunIdField = "id"; private final String versionField = "version"; private final String revisionField = "revision"; private final String sourceVersionField = "sourceVersion"; private final String sourceApplicationField = "sourceRevision"; private final String reasonField = "reason"; private final String atField = "at"; private final String clusterInfoField = "clusterInfo"; private final String clusterInfoFlavorField = "flavor"; private final String clusterInfoCostField = "cost"; private final String clusterInfoCpuField = "flavorCpu"; private final String clusterInfoMemField = "flavorMem"; private final String clusterInfoDiskField = "flavorDisk"; private final String clusterInfoTypeField = "clusterType"; private final String clusterInfoHostnamesField = "hostnames"; private final String clusterUtilsField = "clusterUtils"; private final String clusterUtilsCpuField = "cpu"; private final String clusterUtilsMemField = "mem"; private final String clusterUtilsDiskField = "disk"; private final String clusterUtilsDiskBusyField = "diskbusy"; private final String deploymentMetricsField = "metrics"; private final String deploymentMetricsQPSField = "queriesPerSecond"; private final String deploymentMetricsWPSField = "writesPerSecond"; private final String deploymentMetricsDocsField = "documentCount"; private final String deploymentMetricsQueryLatencyField = "queryLatencyMillis"; private final String deploymentMetricsWriteLatencyField = "writeLatencyMillis"; private final String deploymentMetricsUpdateTime = "lastUpdated"; private final String deploymentMetricsWarningsField = "warnings"; private void deploymentsToSlime(Collection<Deployment> deployments, Cursor array) { for (Deployment deployment : deployments) deploymentToSlime(deployment, array.addObject()); } private void deploymentToSlime(Deployment deployment, Cursor object) { zoneIdToSlime(deployment.zone(), object.setObject(zoneField)); object.setString(versionField, deployment.version().toString()); object.setLong(deployTimeField, deployment.at().toEpochMilli()); toSlime(deployment.applicationVersion(), object.setObject(applicationPackageRevisionField)); clusterInfoToSlime(deployment.clusterInfo(), object); clusterUtilsToSlime(deployment.clusterUtils(), object); deploymentMetricsToSlime(deployment.metrics(), object); deployment.activity().lastQueried().ifPresent(instant -> object.setLong(lastQueriedField, instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> object.setLong(lastWrittenField, instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> object.setDouble(lastQueriesPerSecondField, value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> object.setDouble(lastWritesPerSecondField, value)); } private void deploymentMetricsToSlime(DeploymentMetrics metrics, Cursor object) { Cursor root = object.setObject(deploymentMetricsField); root.setDouble(deploymentMetricsQPSField, metrics.queriesPerSecond()); root.setDouble(deploymentMetricsWPSField, metrics.writesPerSecond()); root.setDouble(deploymentMetricsDocsField, metrics.documentCount()); root.setDouble(deploymentMetricsQueryLatencyField, metrics.queryLatencyMillis()); root.setDouble(deploymentMetricsWriteLatencyField, metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> root.setLong(deploymentMetricsUpdateTime, instant.toEpochMilli())); if (!metrics.warnings().isEmpty()) { Cursor warningsObject = root.setObject(deploymentMetricsWarningsField); metrics.warnings().forEach((warning, count) -> warningsObject.setLong(warning.name(), count)); } } private void clusterInfoToSlime(Map<ClusterSpec.Id, ClusterInfo> clusters, Cursor object) { Cursor root = object.setObject(clusterInfoField); for (Map.Entry<ClusterSpec.Id, ClusterInfo> entry : clusters.entrySet()) { toSlime(entry.getValue(), root.setObject(entry.getKey().value())); } } private void toSlime(ClusterInfo info, Cursor object) { object.setString(clusterInfoFlavorField, info.getFlavor()); object.setLong(clusterInfoCostField, info.getFlavorCost()); object.setDouble(clusterInfoCpuField, info.getFlavorCPU()); object.setDouble(clusterInfoMemField, info.getFlavorMem()); object.setDouble(clusterInfoDiskField, info.getFlavorDisk()); object.setString(clusterInfoTypeField, info.getClusterType().name()); Cursor array = object.setArray(clusterInfoHostnamesField); for (String host : info.getHostnames()) { array.addString(host); } } private void clusterUtilsToSlime(Map<ClusterSpec.Id, ClusterUtilization> clusters, Cursor object) { Cursor root = object.setObject(clusterUtilsField); for (Map.Entry<ClusterSpec.Id, ClusterUtilization> entry : clusters.entrySet()) { toSlime(entry.getValue(), root.setObject(entry.getKey().value())); } } private void toSlime(ClusterUtilization utils, Cursor object) { object.setDouble(clusterUtilsCpuField, utils.getCpu()); object.setDouble(clusterUtilsMemField, utils.getMemory()); object.setDouble(clusterUtilsDiskField, utils.getDisk()); object.setDouble(clusterUtilsDiskBusyField, utils.getDiskBusy()); } private void zoneIdToSlime(ZoneId zone, Cursor object) { object.setString(environmentField, zone.environment().value()); object.setString(regionField, zone.region().value()); } private void toSlime(ApplicationVersion applicationVersion, Cursor object) { if (applicationVersion.buildNumber().isPresent() && applicationVersion.source().isPresent()) { object.setLong(applicationBuildNumberField, applicationVersion.buildNumber().getAsLong()); toSlime(applicationVersion.source().get(), object.setObject(sourceRevisionField)); applicationVersion.authorEmail().ifPresent(email -> object.setString(authorEmailField, email)); applicationVersion.compileVersion().ifPresent(version -> object.setString(compileVersionField, version.toString())); applicationVersion.buildTime().ifPresent(time -> object.setLong(buildTimeField, time.toEpochMilli())); } } private void toSlime(SourceRevision sourceRevision, Cursor object) { object.setString(repositoryField, sourceRevision.repository()); object.setString(branchField, sourceRevision.branch()); object.setString(commitField, sourceRevision.commit()); } private void toSlime(DeploymentJobs deploymentJobs, Cursor cursor) { deploymentJobs.projectId().ifPresent(projectId -> cursor.setLong(projectIdField, projectId)); jobStatusToSlime(deploymentJobs.jobStatus().values(), cursor.setArray(jobStatusField)); deploymentJobs.issueId().ifPresent(jiraIssueId -> cursor.setString(issueIdField, jiraIssueId.value())); cursor.setBool(builtInternallyField, deploymentJobs.deployedInternally()); } private void jobStatusToSlime(Collection<JobStatus> jobStatuses, Cursor jobStatusArray) { for (JobStatus jobStatus : jobStatuses) toSlime(jobStatus, jobStatusArray.addObject()); } private void toSlime(JobStatus jobStatus, Cursor object) { object.setString(jobTypeField, jobStatus.type().jobName()); if (jobStatus.jobError().isPresent()) object.setString(errorField, jobStatus.jobError().get().name()); jobStatus.lastTriggered().ifPresent(run -> jobRunToSlime(run, object, lastTriggeredField)); jobStatus.lastCompleted().ifPresent(run -> jobRunToSlime(run, object, lastCompletedField)); jobStatus.lastSuccess().ifPresent(run -> jobRunToSlime(run, object, lastSuccessField)); jobStatus.firstFailing().ifPresent(run -> jobRunToSlime(run, object, firstFailingField)); jobStatus.pausedUntil().ifPresent(until -> object.setLong(pausedUntilField, until)); } private void jobRunToSlime(JobStatus.JobRun jobRun, Cursor parent, String jobRunObjectName) { Cursor object = parent.setObject(jobRunObjectName); object.setLong(jobRunIdField, jobRun.id()); object.setString(versionField, jobRun.platform().toString()); toSlime(jobRun.application(), object.setObject(revisionField)); jobRun.sourcePlatform().ifPresent(version -> object.setString(sourceVersionField, version.toString())); jobRun.sourceApplication().ifPresent(version -> toSlime(version, object.setObject(sourceApplicationField))); object.setString(reasonField, jobRun.reason()); object.setLong(atField, jobRun.at().toEpochMilli()); } private void toSlime(Change deploying, Cursor parentObject, String fieldName) { if (deploying.isEmpty()) return; Cursor object = parentObject.setObject(fieldName); if (deploying.platform().isPresent()) object.setString(versionField, deploying.platform().get().toString()); if (deploying.application().isPresent()) toSlime(deploying.application().get(), object); if (deploying.isPinned()) object.setBool(pinnedField, true); } private void toSlime(Map<HostName, RotationStatus> rotationStatus, Cursor array) { rotationStatus.forEach((hostname, status) -> { Cursor object = array.addObject(); object.setString("hostname", hostname.value()); object.setString("status", status.name()); }); } public Application fromSlime(Slime slime) { Inspector root = slime.get(); ApplicationId id = ApplicationId.fromSerializedForm(root.field(idField).asString()); Instant createdAt = Instant.ofEpochMilli(root.field(createdAtField).asLong()); DeploymentSpec deploymentSpec = DeploymentSpec.fromXml(root.field(deploymentSpecField).asString(), false); ValidationOverrides validationOverrides = ValidationOverrides.fromXml(root.field(validationOverridesField).asString()); List<Deployment> deployments = deploymentsFromSlime(root.field(deploymentsField)); DeploymentJobs deploymentJobs = deploymentJobsFromSlime(root.field(deploymentJobsField)); Change deploying = changeFromSlime(root.field(deployingField)); Change outstandingChange = changeFromSlime(root.field(outstandingChangeField)); Optional<IssueId> ownershipIssueId = optionalString(root.field(ownershipIssueIdField)).map(IssueId::from); Optional<User> owner = optionalString(root.field(ownerField)).map(User::from); OptionalInt majorVersion = optionalInteger(root.field(majorVersionField)); ApplicationMetrics metrics = new ApplicationMetrics(root.field(queryQualityField).asDouble(), root.field(writeQualityField).asDouble()); Optional<String> pemDeployKey = optionalString(root.field(pemDeployKeyField)); Optional<RotationId> legacyRotation = optionalString(root.field(deprecatedRotationField)).map(RotationId::new); List<RotationId> rotations = rotationsFromSlime(root); Map<HostName, RotationStatus> rotationStatus = rotationStatusFromSlime(root.field(rotationStatusField)); return new Application(id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, deploying, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, legacyRotation, rotations, rotationStatus); } private List<Deployment> deploymentsFromSlime(Inspector array) { List<Deployment> deployments = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> deployments.add(deploymentFromSlime(item))); return deployments; } private Deployment deploymentFromSlime(Inspector deploymentObject) { return new Deployment(zoneIdFromSlime(deploymentObject.field(zoneField)), applicationVersionFromSlime(deploymentObject.field(applicationPackageRevisionField)), Version.fromString(deploymentObject.field(versionField).asString()), Instant.ofEpochMilli(deploymentObject.field(deployTimeField).asLong()), clusterUtilsMapFromSlime(deploymentObject.field(clusterUtilsField)), clusterInfoMapFromSlime(deploymentObject.field(clusterInfoField)), deploymentMetricsFromSlime(deploymentObject.field(deploymentMetricsField)), DeploymentActivity.create(optionalInstant(deploymentObject.field(lastQueriedField)), optionalInstant(deploymentObject.field(lastWrittenField)), optionalDouble(deploymentObject.field(lastQueriesPerSecondField)), optionalDouble(deploymentObject.field(lastWritesPerSecondField)))); } private DeploymentMetrics deploymentMetricsFromSlime(Inspector object) { Optional<Instant> instant = object.field(deploymentMetricsUpdateTime).valid() ? Optional.of(Instant.ofEpochMilli(object.field(deploymentMetricsUpdateTime).asLong())) : Optional.empty(); return new DeploymentMetrics(object.field(deploymentMetricsQPSField).asDouble(), object.field(deploymentMetricsWPSField).asDouble(), object.field(deploymentMetricsDocsField).asDouble(), object.field(deploymentMetricsQueryLatencyField).asDouble(), object.field(deploymentMetricsWriteLatencyField).asDouble(), instant, deploymentWarningsFrom(object.field(deploymentMetricsWarningsField))); } private Map<DeploymentMetrics.Warning, Integer> deploymentWarningsFrom(Inspector object) { Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>(); object.traverse((ObjectTraverser) (name, value) -> warnings.put(DeploymentMetrics.Warning.valueOf(name), (int) value.asLong())); return Collections.unmodifiableMap(warnings); } private Map<HostName, RotationStatus> rotationStatusFromSlime(Inspector object) { if (!object.valid()) { return Collections.emptyMap(); } Map<HostName, RotationStatus> rotationStatus = new TreeMap<>(); object.traverse((ArrayTraverser) (idx, inspect) -> { HostName hostname = HostName.from(inspect.field("hostname").asString()); RotationStatus status = RotationStatus.valueOf(inspect.field("status").asString()); rotationStatus.put(hostname, status); }); return Collections.unmodifiableMap(rotationStatus); } private Map<ClusterSpec.Id, ClusterInfo> clusterInfoMapFromSlime (Inspector object) { Map<ClusterSpec.Id, ClusterInfo> map = new HashMap<>(); object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterInfoFromSlime(value))); return map; } private Map<ClusterSpec.Id, ClusterUtilization> clusterUtilsMapFromSlime(Inspector object) { Map<ClusterSpec.Id, ClusterUtilization> map = new HashMap<>(); object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterUtililzationFromSlime(value))); return map; } private ClusterUtilization clusterUtililzationFromSlime(Inspector object) { double cpu = object.field(clusterUtilsCpuField).asDouble(); double mem = object.field(clusterUtilsMemField).asDouble(); double disk = object.field(clusterUtilsDiskField).asDouble(); double diskBusy = object.field(clusterUtilsDiskBusyField).asDouble(); return new ClusterUtilization(mem, cpu, disk, diskBusy); } private ClusterInfo clusterInfoFromSlime(Inspector inspector) { String flavor = inspector.field(clusterInfoFlavorField).asString(); int cost = (int)inspector.field(clusterInfoCostField).asLong(); String type = inspector.field(clusterInfoTypeField).asString(); double flavorCpu = inspector.field(clusterInfoCpuField).asDouble(); double flavorMem = inspector.field(clusterInfoMemField).asDouble(); double flavorDisk = inspector.field(clusterInfoDiskField).asDouble(); List<String> hostnames = new ArrayList<>(); inspector.field(clusterInfoHostnamesField).traverse((ArrayTraverser)(int index, Inspector value) -> hostnames.add(value.asString())); return new ClusterInfo(flavor, cost, flavorCpu, flavorMem, flavorDisk, ClusterSpec.Type.from(type), hostnames); } private ZoneId zoneIdFromSlime(Inspector object) { return ZoneId.from(object.field(environmentField).asString(), object.field(regionField).asString()); } private ApplicationVersion applicationVersionFromSlime(Inspector object) { if ( ! object.valid()) return ApplicationVersion.unknown; OptionalLong applicationBuildNumber = optionalLong(object.field(applicationBuildNumberField)); Optional<SourceRevision> sourceRevision = sourceRevisionFromSlime(object.field(sourceRevisionField)); if ( ! sourceRevision.isPresent() || ! applicationBuildNumber.isPresent()) { return ApplicationVersion.unknown; } Optional<String> authorEmail = optionalString(object.field(authorEmailField)); Optional<Version> compileVersion = optionalString(object.field(compileVersionField)).map(Version::fromString); Optional<Instant> buildTime = optionalInstant(object.field(buildTimeField)); if ( ! authorEmail.isPresent()) return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong()); if ( ! compileVersion.isPresent() || ! buildTime.isPresent()) return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong(), authorEmail.get()); return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong(), authorEmail.get(), compileVersion.get(), buildTime.get()); } private Optional<SourceRevision> sourceRevisionFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new SourceRevision(object.field(repositoryField).asString(), object.field(branchField).asString(), object.field(commitField).asString())); } private DeploymentJobs deploymentJobsFromSlime(Inspector object) { OptionalLong projectId = optionalLong(object.field(projectIdField)); List<JobStatus> jobStatusList = jobStatusListFromSlime(object.field(jobStatusField)); Optional<IssueId> issueId = optionalString(object.field(issueIdField)).map(IssueId::from); boolean builtInternally = object.field(builtInternallyField).asBool(); return new DeploymentJobs(projectId, jobStatusList, issueId, builtInternally); } private Change changeFromSlime(Inspector object) { if ( ! object.valid()) return Change.empty(); Inspector versionFieldValue = object.field(versionField); Change change = Change.empty(); if (versionFieldValue.valid()) change = Change.of(Version.fromString(versionFieldValue.asString())); if (object.field(applicationBuildNumberField).valid()) change = change.with(applicationVersionFromSlime(object)); if (object.field(pinnedField).asBool()) change = change.withPin(); return change; } private List<JobStatus> jobStatusListFromSlime(Inspector array) { List<JobStatus> jobStatusList = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> jobStatusFromSlime(item).ifPresent(jobStatusList::add)); return jobStatusList; } private Optional<JobStatus> jobStatusFromSlime(Inspector object) { Optional<JobType> jobType = JobType.fromOptionalJobName(object.field(jobTypeField).asString()); if (! jobType.isPresent()) return Optional.empty(); Optional<JobError> jobError = Optional.empty(); if (object.field(errorField).valid()) jobError = Optional.of(JobError.valueOf(object.field(errorField).asString())); return Optional.of(new JobStatus(jobType.get(), jobError, jobRunFromSlime(object.field(lastTriggeredField)), jobRunFromSlime(object.field(lastCompletedField)), jobRunFromSlime(object.field(firstFailingField)), jobRunFromSlime(object.field(lastSuccessField)), optionalLong(object.field(pausedUntilField)))); } private Optional<JobStatus.JobRun> jobRunFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new JobStatus.JobRun(object.field(jobRunIdField).asLong(), new Version(object.field(versionField).asString()), applicationVersionFromSlime(object.field(revisionField)), optionalString(object.field(sourceVersionField)).map(Version::fromString), Optional.of(object.field(sourceApplicationField)).filter(Inspector::valid).map(this::applicationVersionFromSlime), object.field(reasonField).asString(), Instant.ofEpochMilli(object.field(atField).asLong()))); } private List<RotationId> rotationsFromSlime(Inspector root) { final var rotations = rotationListFromSlime(root.field(rotationsField)); final var legacyRotation = legacyRotationFromSlime(root.field(deprecatedRotationField)); if (legacyRotation.isPresent() && ! rotations.contains(legacyRotation.get())) { rotations.add(legacyRotation.get()); } return rotations; } private List<RotationId> rotationListFromSlime(Inspector field) { final var rotations = new ArrayList<RotationId>(); field.traverse((ArrayTraverser) (idx, inspector) -> { final var rotation = new RotationId(inspector.asString()); rotations.add(rotation); }); return rotations; } private Optional<RotationId> legacyRotationFromSlime(Inspector field) { return field.valid() ? optionalString(field).map(RotationId::new) : Optional.empty(); } private OptionalLong optionalLong(Inspector field) { return field.valid() ? OptionalLong.of(field.asLong()) : OptionalLong.empty(); } private OptionalInt optionalInteger(Inspector field) { return field.valid() ? OptionalInt.of((int) field.asLong()) : OptionalInt.empty(); } private OptionalDouble optionalDouble(Inspector field) { return field.valid() ? OptionalDouble.of(field.asDouble()) : OptionalDouble.empty(); } private Optional<String> optionalString(Inspector field) { return SlimeUtils.optionalString(field); } private Optional<Instant> optionalInstant(Inspector field) { OptionalLong value = optionalLong(field); return value.isPresent() ? Optional.of(Instant.ofEpochMilli(value.getAsLong())) : Optional.empty(); } }
I doubt this `IOException` will be ever thrown.
private static String wrap(String message) { try { return new ObjectMapper().writeValueAsString(Map.of("error", message)); } catch (IOException e) { log.log(WARNING, "Could not encode error message to json:", e); return "Could not encode error message to json, check the log for details."; } }
log.log(WARNING, "Could not encode error message to json:", e);
private static String wrap(String message) { try { return new ObjectMapper().writeValueAsString(Map.of("error", message)); } catch (JsonProcessingException e) { log.log(WARNING, "Could not encode error message to json:", e); return "Could not encode error message to json, check the log for details."; } }
class JsonRenderingException extends RuntimeException { private static Logger log = Logger.getLogger(JsonRenderingException.class.getName()); JsonRenderingException(String message) { super(message); } public String getMessageAsJson() { return wrap(getMessage()); } }
class JsonRenderingException extends RuntimeException { private static Logger log = Logger.getLogger(JsonRenderingException.class.getName()); JsonRenderingException(String message) { super(message); } /** * Returns the message wrapped in an "error" json object. In the unlikely case that json rendering of the * error message fails, a plain text string will be returned instead. */ public String getMessageAsJson() { return wrap(getMessage()); } }
Consider assigning a name to the thread (for easier investigating of stack dumps).
public FilesArchived(File rootDir) { this.root = rootDir; rescan(); Thread thread = new Thread(this::run); thread.setDaemon(true); thread.start(); }
Thread thread = new Thread(this::run);
public FilesArchived(File rootDir) { this.root = rootDir; rescan(); Thread thread = new Thread(this::run); thread.setDaemon(true); thread.setName("FilesArchived-maintainer"); thread.start(); }
class FilesArchived { private static final Logger log = Logger.getLogger(FilesArchived.class.getName()); /** * File instance representing root directory of archive */ private final File root; private List<LogFile> knownFiles; public final static long compressAfterMillis = 2L * 3600 * 1000; private long maxAgeDays = 30; private long sizeLimit = 30L * (1L << 30); private void run() { try { Thread.sleep(125000); while (true) { while (maintenance()) { Thread.sleep(2000); } Thread.sleep(299000); } } catch (InterruptedException e) { } } /** * Creates an instance of FilesArchive managing the given directory */ public String toString() { return FilesArchived.class.getName() + ": root=" + root; } public synchronized int highestGen(String prefix) { int gen = 0; for (LogFile lf : knownFiles) { if (prefix.equals(lf.prefix)) { gen = Math.max(gen, lf.generation); } } return gen; } public synchronized boolean maintenance() { boolean action = false; rescan(); if (removeOlderThan(maxAgeDays)) { action = true; rescan(); } if (compressOldFiles()) { action = true; rescan(); } long days = maxAgeDays; while (tooMuchDiskUsage() && (--days > 1)) { if (removeOlderThan(days)) { action = true; rescan(); } } return action; } private void rescan() { knownFiles = scanDir(root); } boolean tooMuchDiskUsage() { long sz = sumFileSizes(); return sz > sizeLimit; } private boolean olderThan(LogFile lf, long days, long now) { long mtime = lf.path.lastModified(); long diff = now - mtime; return (diff > days * 86400L * 1000L); } private boolean removeOlderThan(long days) { boolean action = false; long now = System.currentTimeMillis(); for (LogFile lf : knownFiles) { if (olderThan(lf, days, now)) { lf.path.delete(); log.info("Deleted: "+lf.path); action = true; } } return action; } private boolean compressOldFiles() { boolean action = false; long now = System.currentTimeMillis(); int count = 0; for (LogFile lf : knownFiles) { if (lf.canCompress(now) && (count++ < 5)) { compress(lf.path); } } return count > 0; } private void compress(File oldFile) { File gzippedFile = new File(oldFile.getPath() + ".gz"); try (GZIPOutputStream compressor = new GZIPOutputStream(new FileOutputStream(gzippedFile), 0x100000); FileInputStream inputStream = new FileInputStream(oldFile)) { long mtime = oldFile.lastModified(); byte [] buffer = new byte[0x100000]; for (int read = inputStream.read(buffer); read > 0; read = inputStream.read(buffer)) { compressor.write(buffer, 0, read); } compressor.finish(); compressor.flush(); oldFile.delete(); gzippedFile.setLastModified(mtime); log.info("Compressed: "+gzippedFile); } catch (IOException e) { log.warning("Got '" + e + "' while compressing '" + oldFile.getPath() + "'."); } } long sumFileSizes() { long sum = 0; for (LogFile lf : knownFiles) { sum += lf.path.length(); } return sum; } private static Pattern dateFormatRegexp = Pattern.compile(".*/" + "[0-9][0-9][0-9][0-9]/" + "[0-9][0-9]/" + "[0-9][0-9]/" + "[0-9][0-9]-" + "[0-9].*"); private static List<LogFile> scanDir(File top) { List<LogFile> retval = new ArrayList<>(); String[] names = top.list(); if (names != null) { for (String name : names) { File sub = new File(top, name); if (sub.isFile()) { String pathName = sub.toString(); if (dateFormatRegexp.matcher(pathName).matches()) { retval.add(new LogFile(sub)); } else { log.warning("skipping file not matching log archive pattern: "+pathName); } } else if (sub.isDirectory()) { for (LogFile subFile : scanDir(sub)) { retval.add(subFile); } } } } return retval; } static class LogFile { public final File path; public final String prefix; public final int generation; public final boolean zsuff; public boolean canCompress(long now) { if (zsuff) return false; if (! path.isFile()) return false; long diff = now - path.lastModified(); if (diff < compressAfterMillis) return false; return true; } private static int generationOf(String name) { int dash = name.lastIndexOf('-'); if (dash < 0) return 0; String suff = name.substring(dash + 1); int r = 0; for (char ch : suff.toCharArray()) { if (ch >= '0' && ch <= '9') { r *= 10; r += (ch - '0'); } else { break; } } return r; } private static String prefixOf(String name) { int dash = name.lastIndexOf('-'); if (dash < 0) return name; return name.substring(0, dash); } private static boolean zSuffix(String name) { if (name.endsWith(".gz")) return true; return false; } public LogFile(File path) { String name = path.toString(); this.path = path; this.prefix = prefixOf(name); this.generation = generationOf(name); this.zsuff = zSuffix(name); } public String toString() { return "FilesArchived.LogFile{name="+path+" prefix="+prefix+" gen="+generation+" z="+zsuff+"}"; } } }
class FilesArchived { private static final Logger log = Logger.getLogger(FilesArchived.class.getName()); /** * File instance representing root directory of archive */ private final File root; private List<LogFile> knownFiles; public final static long compressAfterMillis = 2L * 3600 * 1000; private long maxAgeDays = 30; private long sizeLimit = 30L * (1L << 30); private void run() { try { Thread.sleep(125000); while (true) { while (maintenance()) { Thread.sleep(2000); } Thread.sleep(299000); } } catch (Exception e) { System.err.println("Fatal exception in FilesArchived-maintainer thread: "+e); } } /** * Creates an instance of FilesArchive managing the given directory */ public String toString() { return FilesArchived.class.getName() + ": root=" + root; } public synchronized int highestGen(String prefix) { int gen = 0; for (LogFile lf : knownFiles) { if (prefix.equals(lf.prefix)) { gen = Math.max(gen, lf.generation); } } return gen; } public synchronized boolean maintenance() { boolean action = false; rescan(); if (removeOlderThan(maxAgeDays)) { action = true; rescan(); } if (compressOldFiles()) { action = true; rescan(); } long days = maxAgeDays; while (tooMuchDiskUsage() && (--days > 1)) { if (removeOlderThan(days)) { action = true; rescan(); } } return action; } private void rescan() { knownFiles = scanDir(root); } boolean tooMuchDiskUsage() { long sz = sumFileSizes(); return sz > sizeLimit; } private boolean olderThan(LogFile lf, long days, long now) { long mtime = lf.path.lastModified(); long diff = now - mtime; return (diff > days * 86400L * 1000L); } private boolean removeOlderThan(long days) { boolean action = false; long now = System.currentTimeMillis(); for (LogFile lf : knownFiles) { if (olderThan(lf, days, now)) { lf.path.delete(); log.info("Deleted: "+lf.path); action = true; } } return action; } private boolean compressOldFiles() { boolean action = false; long now = System.currentTimeMillis(); int count = 0; for (LogFile lf : knownFiles) { if (lf.canCompress(now) && (count++ < 5)) { compress(lf.path); } } return count > 0; } private void compress(File oldFile) { File gzippedFile = new File(oldFile.getPath() + ".gz"); try (GZIPOutputStream compressor = new GZIPOutputStream(new FileOutputStream(gzippedFile), 0x100000); FileInputStream inputStream = new FileInputStream(oldFile)) { long mtime = oldFile.lastModified(); byte [] buffer = new byte[0x100000]; for (int read = inputStream.read(buffer); read > 0; read = inputStream.read(buffer)) { compressor.write(buffer, 0, read); } compressor.finish(); compressor.flush(); oldFile.delete(); gzippedFile.setLastModified(mtime); log.info("Compressed: "+gzippedFile); } catch (IOException e) { log.warning("Got '" + e + "' while compressing '" + oldFile.getPath() + "'."); } } long sumFileSizes() { long sum = 0; for (LogFile lf : knownFiles) { sum += lf.path.length(); } return sum; } private static Pattern dateFormatRegexp = Pattern.compile(".*/" + "[0-9][0-9][0-9][0-9]/" + "[0-9][0-9]/" + "[0-9][0-9]/" + "[0-9][0-9]-" + "[0-9].*"); private static List<LogFile> scanDir(File top) { List<LogFile> retval = new ArrayList<>(); String[] names = top.list(); if (names != null) { for (String name : names) { File sub = new File(top, name); if (sub.isFile()) { String pathName = sub.toString(); if (dateFormatRegexp.matcher(pathName).matches()) { retval.add(new LogFile(sub)); } else { log.warning("skipping file not matching log archive pattern: "+pathName); } } else if (sub.isDirectory()) { for (LogFile subFile : scanDir(sub)) { retval.add(subFile); } } } } return retval; } static class LogFile { public final File path; public final String prefix; public final int generation; public final boolean zsuff; public boolean canCompress(long now) { if (zsuff) return false; if (! path.isFile()) return false; long diff = now - path.lastModified(); if (diff < compressAfterMillis) return false; return true; } private static int generationOf(String name) { int dash = name.lastIndexOf('-'); if (dash < 0) return 0; String suff = name.substring(dash + 1); int r = 0; for (char ch : suff.toCharArray()) { if (ch >= '0' && ch <= '9') { r *= 10; r += (ch - '0'); } else { break; } } return r; } private static String prefixOf(String name) { int dash = name.lastIndexOf('-'); if (dash < 0) return name; return name.substring(0, dash); } private static boolean zSuffix(String name) { if (name.endsWith(".gz")) return true; return false; } public LogFile(File path) { String name = path.toString(); this.path = path; this.prefix = prefixOf(name); this.generation = generationOf(name); this.zsuff = zSuffix(name); } public String toString() { return "FilesArchived.LogFile{name="+path+" prefix="+prefix+" gen="+generation+" z="+zsuff+"}"; } } }
The maintenance thread will stop if `maintenance()` throws an unchecked exception.
private void run() { try { Thread.sleep(125000); while (true) { while (maintenance()) { Thread.sleep(2000); } Thread.sleep(299000); } } catch (InterruptedException e) { } }
while (maintenance()) {
private void run() { try { Thread.sleep(125000); while (true) { while (maintenance()) { Thread.sleep(2000); } Thread.sleep(299000); } } catch (Exception e) { System.err.println("Fatal exception in FilesArchived-maintainer thread: "+e); } }
class FilesArchived { private static final Logger log = Logger.getLogger(FilesArchived.class.getName()); /** * File instance representing root directory of archive */ private final File root; private List<LogFile> knownFiles; public final static long compressAfterMillis = 2L * 3600 * 1000; private long maxAgeDays = 30; private long sizeLimit = 30L * (1L << 30); /** * Creates an instance of FilesArchive managing the given directory */ public FilesArchived(File rootDir) { this.root = rootDir; rescan(); Thread thread = new Thread(this::run); thread.setDaemon(true); thread.start(); } public String toString() { return FilesArchived.class.getName() + ": root=" + root; } public synchronized int highestGen(String prefix) { int gen = 0; for (LogFile lf : knownFiles) { if (prefix.equals(lf.prefix)) { gen = Math.max(gen, lf.generation); } } return gen; } public synchronized boolean maintenance() { boolean action = false; rescan(); if (removeOlderThan(maxAgeDays)) { action = true; rescan(); } if (compressOldFiles()) { action = true; rescan(); } long days = maxAgeDays; while (tooMuchDiskUsage() && (--days > 1)) { if (removeOlderThan(days)) { action = true; rescan(); } } return action; } private void rescan() { knownFiles = scanDir(root); } boolean tooMuchDiskUsage() { long sz = sumFileSizes(); return sz > sizeLimit; } private boolean olderThan(LogFile lf, long days, long now) { long mtime = lf.path.lastModified(); long diff = now - mtime; return (diff > days * 86400L * 1000L); } private boolean removeOlderThan(long days) { boolean action = false; long now = System.currentTimeMillis(); for (LogFile lf : knownFiles) { if (olderThan(lf, days, now)) { lf.path.delete(); log.info("Deleted: "+lf.path); action = true; } } return action; } private boolean compressOldFiles() { boolean action = false; long now = System.currentTimeMillis(); int count = 0; for (LogFile lf : knownFiles) { if (lf.canCompress(now) && (count++ < 5)) { compress(lf.path); } } return count > 0; } private void compress(File oldFile) { File gzippedFile = new File(oldFile.getPath() + ".gz"); try (GZIPOutputStream compressor = new GZIPOutputStream(new FileOutputStream(gzippedFile), 0x100000); FileInputStream inputStream = new FileInputStream(oldFile)) { long mtime = oldFile.lastModified(); byte [] buffer = new byte[0x100000]; for (int read = inputStream.read(buffer); read > 0; read = inputStream.read(buffer)) { compressor.write(buffer, 0, read); } compressor.finish(); compressor.flush(); oldFile.delete(); gzippedFile.setLastModified(mtime); log.info("Compressed: "+gzippedFile); } catch (IOException e) { log.warning("Got '" + e + "' while compressing '" + oldFile.getPath() + "'."); } } long sumFileSizes() { long sum = 0; for (LogFile lf : knownFiles) { sum += lf.path.length(); } return sum; } private static Pattern dateFormatRegexp = Pattern.compile(".*/" + "[0-9][0-9][0-9][0-9]/" + "[0-9][0-9]/" + "[0-9][0-9]/" + "[0-9][0-9]-" + "[0-9].*"); private static List<LogFile> scanDir(File top) { List<LogFile> retval = new ArrayList<>(); String[] names = top.list(); if (names != null) { for (String name : names) { File sub = new File(top, name); if (sub.isFile()) { String pathName = sub.toString(); if (dateFormatRegexp.matcher(pathName).matches()) { retval.add(new LogFile(sub)); } else { log.warning("skipping file not matching log archive pattern: "+pathName); } } else if (sub.isDirectory()) { for (LogFile subFile : scanDir(sub)) { retval.add(subFile); } } } } return retval; } static class LogFile { public final File path; public final String prefix; public final int generation; public final boolean zsuff; public boolean canCompress(long now) { if (zsuff) return false; if (! path.isFile()) return false; long diff = now - path.lastModified(); if (diff < compressAfterMillis) return false; return true; } private static int generationOf(String name) { int dash = name.lastIndexOf('-'); if (dash < 0) return 0; String suff = name.substring(dash + 1); int r = 0; for (char ch : suff.toCharArray()) { if (ch >= '0' && ch <= '9') { r *= 10; r += (ch - '0'); } else { break; } } return r; } private static String prefixOf(String name) { int dash = name.lastIndexOf('-'); if (dash < 0) return name; return name.substring(0, dash); } private static boolean zSuffix(String name) { if (name.endsWith(".gz")) return true; return false; } public LogFile(File path) { String name = path.toString(); this.path = path; this.prefix = prefixOf(name); this.generation = generationOf(name); this.zsuff = zSuffix(name); } public String toString() { return "FilesArchived.LogFile{name="+path+" prefix="+prefix+" gen="+generation+" z="+zsuff+"}"; } } }
class FilesArchived { private static final Logger log = Logger.getLogger(FilesArchived.class.getName()); /** * File instance representing root directory of archive */ private final File root; private List<LogFile> knownFiles; public final static long compressAfterMillis = 2L * 3600 * 1000; private long maxAgeDays = 30; private long sizeLimit = 30L * (1L << 30); /** * Creates an instance of FilesArchive managing the given directory */ public FilesArchived(File rootDir) { this.root = rootDir; rescan(); Thread thread = new Thread(this::run); thread.setDaemon(true); thread.setName("FilesArchived-maintainer"); thread.start(); } public String toString() { return FilesArchived.class.getName() + ": root=" + root; } public synchronized int highestGen(String prefix) { int gen = 0; for (LogFile lf : knownFiles) { if (prefix.equals(lf.prefix)) { gen = Math.max(gen, lf.generation); } } return gen; } public synchronized boolean maintenance() { boolean action = false; rescan(); if (removeOlderThan(maxAgeDays)) { action = true; rescan(); } if (compressOldFiles()) { action = true; rescan(); } long days = maxAgeDays; while (tooMuchDiskUsage() && (--days > 1)) { if (removeOlderThan(days)) { action = true; rescan(); } } return action; } private void rescan() { knownFiles = scanDir(root); } boolean tooMuchDiskUsage() { long sz = sumFileSizes(); return sz > sizeLimit; } private boolean olderThan(LogFile lf, long days, long now) { long mtime = lf.path.lastModified(); long diff = now - mtime; return (diff > days * 86400L * 1000L); } private boolean removeOlderThan(long days) { boolean action = false; long now = System.currentTimeMillis(); for (LogFile lf : knownFiles) { if (olderThan(lf, days, now)) { lf.path.delete(); log.info("Deleted: "+lf.path); action = true; } } return action; } private boolean compressOldFiles() { boolean action = false; long now = System.currentTimeMillis(); int count = 0; for (LogFile lf : knownFiles) { if (lf.canCompress(now) && (count++ < 5)) { compress(lf.path); } } return count > 0; } private void compress(File oldFile) { File gzippedFile = new File(oldFile.getPath() + ".gz"); try (GZIPOutputStream compressor = new GZIPOutputStream(new FileOutputStream(gzippedFile), 0x100000); FileInputStream inputStream = new FileInputStream(oldFile)) { long mtime = oldFile.lastModified(); byte [] buffer = new byte[0x100000]; for (int read = inputStream.read(buffer); read > 0; read = inputStream.read(buffer)) { compressor.write(buffer, 0, read); } compressor.finish(); compressor.flush(); oldFile.delete(); gzippedFile.setLastModified(mtime); log.info("Compressed: "+gzippedFile); } catch (IOException e) { log.warning("Got '" + e + "' while compressing '" + oldFile.getPath() + "'."); } } long sumFileSizes() { long sum = 0; for (LogFile lf : knownFiles) { sum += lf.path.length(); } return sum; } private static Pattern dateFormatRegexp = Pattern.compile(".*/" + "[0-9][0-9][0-9][0-9]/" + "[0-9][0-9]/" + "[0-9][0-9]/" + "[0-9][0-9]-" + "[0-9].*"); private static List<LogFile> scanDir(File top) { List<LogFile> retval = new ArrayList<>(); String[] names = top.list(); if (names != null) { for (String name : names) { File sub = new File(top, name); if (sub.isFile()) { String pathName = sub.toString(); if (dateFormatRegexp.matcher(pathName).matches()) { retval.add(new LogFile(sub)); } else { log.warning("skipping file not matching log archive pattern: "+pathName); } } else if (sub.isDirectory()) { for (LogFile subFile : scanDir(sub)) { retval.add(subFile); } } } } return retval; } static class LogFile { public final File path; public final String prefix; public final int generation; public final boolean zsuff; public boolean canCompress(long now) { if (zsuff) return false; if (! path.isFile()) return false; long diff = now - path.lastModified(); if (diff < compressAfterMillis) return false; return true; } private static int generationOf(String name) { int dash = name.lastIndexOf('-'); if (dash < 0) return 0; String suff = name.substring(dash + 1); int r = 0; for (char ch : suff.toCharArray()) { if (ch >= '0' && ch <= '9') { r *= 10; r += (ch - '0'); } else { break; } } return r; } private static String prefixOf(String name) { int dash = name.lastIndexOf('-'); if (dash < 0) return name; return name.substring(0, dash); } private static boolean zSuffix(String name) { if (name.endsWith(".gz")) return true; return false; } public LogFile(File path) { String name = path.toString(); this.path = path; this.prefix = prefixOf(name); this.generation = generationOf(name); this.zsuff = zSuffix(name); } public String toString() { return "FilesArchived.LogFile{name="+path+" prefix="+prefix+" gen="+generation+" z="+zsuff+"}"; } } }
yes.
private void run() { try { Thread.sleep(125000); while (true) { while (maintenance()) { Thread.sleep(2000); } Thread.sleep(299000); } } catch (InterruptedException e) { } }
while (maintenance()) {
private void run() { try { Thread.sleep(125000); while (true) { while (maintenance()) { Thread.sleep(2000); } Thread.sleep(299000); } } catch (Exception e) { System.err.println("Fatal exception in FilesArchived-maintainer thread: "+e); } }
class FilesArchived { private static final Logger log = Logger.getLogger(FilesArchived.class.getName()); /** * File instance representing root directory of archive */ private final File root; private List<LogFile> knownFiles; public final static long compressAfterMillis = 2L * 3600 * 1000; private long maxAgeDays = 30; private long sizeLimit = 30L * (1L << 30); /** * Creates an instance of FilesArchive managing the given directory */ public FilesArchived(File rootDir) { this.root = rootDir; rescan(); Thread thread = new Thread(this::run); thread.setDaemon(true); thread.start(); } public String toString() { return FilesArchived.class.getName() + ": root=" + root; } public synchronized int highestGen(String prefix) { int gen = 0; for (LogFile lf : knownFiles) { if (prefix.equals(lf.prefix)) { gen = Math.max(gen, lf.generation); } } return gen; } public synchronized boolean maintenance() { boolean action = false; rescan(); if (removeOlderThan(maxAgeDays)) { action = true; rescan(); } if (compressOldFiles()) { action = true; rescan(); } long days = maxAgeDays; while (tooMuchDiskUsage() && (--days > 1)) { if (removeOlderThan(days)) { action = true; rescan(); } } return action; } private void rescan() { knownFiles = scanDir(root); } boolean tooMuchDiskUsage() { long sz = sumFileSizes(); return sz > sizeLimit; } private boolean olderThan(LogFile lf, long days, long now) { long mtime = lf.path.lastModified(); long diff = now - mtime; return (diff > days * 86400L * 1000L); } private boolean removeOlderThan(long days) { boolean action = false; long now = System.currentTimeMillis(); for (LogFile lf : knownFiles) { if (olderThan(lf, days, now)) { lf.path.delete(); log.info("Deleted: "+lf.path); action = true; } } return action; } private boolean compressOldFiles() { boolean action = false; long now = System.currentTimeMillis(); int count = 0; for (LogFile lf : knownFiles) { if (lf.canCompress(now) && (count++ < 5)) { compress(lf.path); } } return count > 0; } private void compress(File oldFile) { File gzippedFile = new File(oldFile.getPath() + ".gz"); try (GZIPOutputStream compressor = new GZIPOutputStream(new FileOutputStream(gzippedFile), 0x100000); FileInputStream inputStream = new FileInputStream(oldFile)) { long mtime = oldFile.lastModified(); byte [] buffer = new byte[0x100000]; for (int read = inputStream.read(buffer); read > 0; read = inputStream.read(buffer)) { compressor.write(buffer, 0, read); } compressor.finish(); compressor.flush(); oldFile.delete(); gzippedFile.setLastModified(mtime); log.info("Compressed: "+gzippedFile); } catch (IOException e) { log.warning("Got '" + e + "' while compressing '" + oldFile.getPath() + "'."); } } long sumFileSizes() { long sum = 0; for (LogFile lf : knownFiles) { sum += lf.path.length(); } return sum; } private static Pattern dateFormatRegexp = Pattern.compile(".*/" + "[0-9][0-9][0-9][0-9]/" + "[0-9][0-9]/" + "[0-9][0-9]/" + "[0-9][0-9]-" + "[0-9].*"); private static List<LogFile> scanDir(File top) { List<LogFile> retval = new ArrayList<>(); String[] names = top.list(); if (names != null) { for (String name : names) { File sub = new File(top, name); if (sub.isFile()) { String pathName = sub.toString(); if (dateFormatRegexp.matcher(pathName).matches()) { retval.add(new LogFile(sub)); } else { log.warning("skipping file not matching log archive pattern: "+pathName); } } else if (sub.isDirectory()) { for (LogFile subFile : scanDir(sub)) { retval.add(subFile); } } } } return retval; } static class LogFile { public final File path; public final String prefix; public final int generation; public final boolean zsuff; public boolean canCompress(long now) { if (zsuff) return false; if (! path.isFile()) return false; long diff = now - path.lastModified(); if (diff < compressAfterMillis) return false; return true; } private static int generationOf(String name) { int dash = name.lastIndexOf('-'); if (dash < 0) return 0; String suff = name.substring(dash + 1); int r = 0; for (char ch : suff.toCharArray()) { if (ch >= '0' && ch <= '9') { r *= 10; r += (ch - '0'); } else { break; } } return r; } private static String prefixOf(String name) { int dash = name.lastIndexOf('-'); if (dash < 0) return name; return name.substring(0, dash); } private static boolean zSuffix(String name) { if (name.endsWith(".gz")) return true; return false; } public LogFile(File path) { String name = path.toString(); this.path = path; this.prefix = prefixOf(name); this.generation = generationOf(name); this.zsuff = zSuffix(name); } public String toString() { return "FilesArchived.LogFile{name="+path+" prefix="+prefix+" gen="+generation+" z="+zsuff+"}"; } } }
class FilesArchived { private static final Logger log = Logger.getLogger(FilesArchived.class.getName()); /** * File instance representing root directory of archive */ private final File root; private List<LogFile> knownFiles; public final static long compressAfterMillis = 2L * 3600 * 1000; private long maxAgeDays = 30; private long sizeLimit = 30L * (1L << 30); /** * Creates an instance of FilesArchive managing the given directory */ public FilesArchived(File rootDir) { this.root = rootDir; rescan(); Thread thread = new Thread(this::run); thread.setDaemon(true); thread.setName("FilesArchived-maintainer"); thread.start(); } public String toString() { return FilesArchived.class.getName() + ": root=" + root; } public synchronized int highestGen(String prefix) { int gen = 0; for (LogFile lf : knownFiles) { if (prefix.equals(lf.prefix)) { gen = Math.max(gen, lf.generation); } } return gen; } public synchronized boolean maintenance() { boolean action = false; rescan(); if (removeOlderThan(maxAgeDays)) { action = true; rescan(); } if (compressOldFiles()) { action = true; rescan(); } long days = maxAgeDays; while (tooMuchDiskUsage() && (--days > 1)) { if (removeOlderThan(days)) { action = true; rescan(); } } return action; } private void rescan() { knownFiles = scanDir(root); } boolean tooMuchDiskUsage() { long sz = sumFileSizes(); return sz > sizeLimit; } private boolean olderThan(LogFile lf, long days, long now) { long mtime = lf.path.lastModified(); long diff = now - mtime; return (diff > days * 86400L * 1000L); } private boolean removeOlderThan(long days) { boolean action = false; long now = System.currentTimeMillis(); for (LogFile lf : knownFiles) { if (olderThan(lf, days, now)) { lf.path.delete(); log.info("Deleted: "+lf.path); action = true; } } return action; } private boolean compressOldFiles() { boolean action = false; long now = System.currentTimeMillis(); int count = 0; for (LogFile lf : knownFiles) { if (lf.canCompress(now) && (count++ < 5)) { compress(lf.path); } } return count > 0; } private void compress(File oldFile) { File gzippedFile = new File(oldFile.getPath() + ".gz"); try (GZIPOutputStream compressor = new GZIPOutputStream(new FileOutputStream(gzippedFile), 0x100000); FileInputStream inputStream = new FileInputStream(oldFile)) { long mtime = oldFile.lastModified(); byte [] buffer = new byte[0x100000]; for (int read = inputStream.read(buffer); read > 0; read = inputStream.read(buffer)) { compressor.write(buffer, 0, read); } compressor.finish(); compressor.flush(); oldFile.delete(); gzippedFile.setLastModified(mtime); log.info("Compressed: "+gzippedFile); } catch (IOException e) { log.warning("Got '" + e + "' while compressing '" + oldFile.getPath() + "'."); } } long sumFileSizes() { long sum = 0; for (LogFile lf : knownFiles) { sum += lf.path.length(); } return sum; } private static Pattern dateFormatRegexp = Pattern.compile(".*/" + "[0-9][0-9][0-9][0-9]/" + "[0-9][0-9]/" + "[0-9][0-9]/" + "[0-9][0-9]-" + "[0-9].*"); private static List<LogFile> scanDir(File top) { List<LogFile> retval = new ArrayList<>(); String[] names = top.list(); if (names != null) { for (String name : names) { File sub = new File(top, name); if (sub.isFile()) { String pathName = sub.toString(); if (dateFormatRegexp.matcher(pathName).matches()) { retval.add(new LogFile(sub)); } else { log.warning("skipping file not matching log archive pattern: "+pathName); } } else if (sub.isDirectory()) { for (LogFile subFile : scanDir(sub)) { retval.add(subFile); } } } } return retval; } static class LogFile { public final File path; public final String prefix; public final int generation; public final boolean zsuff; public boolean canCompress(long now) { if (zsuff) return false; if (! path.isFile()) return false; long diff = now - path.lastModified(); if (diff < compressAfterMillis) return false; return true; } private static int generationOf(String name) { int dash = name.lastIndexOf('-'); if (dash < 0) return 0; String suff = name.substring(dash + 1); int r = 0; for (char ch : suff.toCharArray()) { if (ch >= '0' && ch <= '9') { r *= 10; r += (ch - '0'); } else { break; } } return r; } private static String prefixOf(String name) { int dash = name.lastIndexOf('-'); if (dash < 0) return name; return name.substring(0, dash); } private static boolean zSuffix(String name) { if (name.endsWith(".gz")) return true; return false; } public LogFile(File path) { String name = path.toString(); this.path = path; this.prefix = prefixOf(name); this.generation = generationOf(name); this.zsuff = zSuffix(name); } public String toString() { return "FilesArchived.LogFile{name="+path+" prefix="+prefix+" gen="+generation+" z="+zsuff+"}"; } } }
And these will be inactivated, dirtied, readied, and activated into the tenant-host at the next release w/o the type=host cluster?
public void conflicting_type_allocation_test() { tester.activate(zoneApp, prepareSystemApplication(zoneApp, NodeType.proxy, "routing")); inactiveExpirer.maintain(); tester.nodeRepository().getNodes(NodeType.host) .forEach(node -> tester.nodeRepository().setReady(node.hostname(), Agent.operator, "Readied by host-admin")); tester.activate(tenantHostApp, prepareSystemApplication(tenantHostApp, NodeType.host, "tenant-host")); Set<Node> allNodes = Set.copyOf(tester.nodeRepository().getNodes()); List<HostSpec> proxyHostSpecs = prepareSystemApplication(zoneApp, NodeType.proxy, "routing"); List<HostSpec> nodeAdminHostSpecs = prepareSystemApplication(zoneApp, NodeType.host, "node-admin"); List<HostSpec> zoneAppHostSpecs = concat(proxyHostSpecs, nodeAdminHostSpecs, Collectors.toList()); tester.activate(zoneApp, zoneAppHostSpecs); assertEquals(0, nodeAdminHostSpecs.size()); assertEquals(allNodes, Set.copyOf(tester.nodeRepository().getNodes())); Node newHost = tester.makeReadyNodes(1, "large", NodeType.host).get(0); proxyHostSpecs = prepareSystemApplication(zoneApp, NodeType.proxy, "routing"); nodeAdminHostSpecs = prepareSystemApplication(zoneApp, NodeType.host, "node-admin"); zoneAppHostSpecs = concat(proxyHostSpecs, nodeAdminHostSpecs, Collectors.toList()); tester.activate(zoneApp, zoneAppHostSpecs); assertEquals(1, nodeAdminHostSpecs.size()); newHost = tester.nodeRepository().getNode(newHost.hostname()).orElseThrow(); Set<Node> allNodesWithNewHost = concat(allNodes, Set.of(newHost), Collectors.toSet()); assertEquals(allNodesWithNewHost, Set.copyOf(tester.nodeRepository().getNodes())); assertEquals(zoneApp, newHost.allocation().get().owner()); }
public void conflicting_type_allocation_test() { tester.activate(zoneApp, prepareSystemApplication(zoneApp, NodeType.proxy, "routing")); inactiveExpirer.maintain(); tester.nodeRepository().getNodes(NodeType.host) .forEach(node -> tester.nodeRepository().setReady(node.hostname(), Agent.operator, "Readied by host-admin")); tester.activate(tenantHostApp, prepareSystemApplication(tenantHostApp, NodeType.host, "tenant-host")); Set<Node> allNodes = Set.copyOf(tester.nodeRepository().getNodes()); List<HostSpec> proxyHostSpecs = prepareSystemApplication(zoneApp, NodeType.proxy, "routing"); List<HostSpec> nodeAdminHostSpecs = prepareSystemApplication(zoneApp, NodeType.host, "node-admin"); List<HostSpec> zoneAppHostSpecs = concat(proxyHostSpecs, nodeAdminHostSpecs, Collectors.toList()); tester.activate(zoneApp, zoneAppHostSpecs); assertEquals(0, nodeAdminHostSpecs.size()); assertEquals(allNodes, Set.copyOf(tester.nodeRepository().getNodes())); Node newHost = tester.makeReadyNodes(1, "large", NodeType.host).get(0); proxyHostSpecs = prepareSystemApplication(zoneApp, NodeType.proxy, "routing"); nodeAdminHostSpecs = prepareSystemApplication(zoneApp, NodeType.host, "node-admin"); zoneAppHostSpecs = concat(proxyHostSpecs, nodeAdminHostSpecs, Collectors.toList()); tester.activate(zoneApp, zoneAppHostSpecs); assertEquals(1, nodeAdminHostSpecs.size()); newHost = tester.nodeRepository().getNode(newHost.hostname()).orElseThrow(); Set<Node> allNodesWithNewHost = concat(allNodes, Set.of(newHost), Collectors.toSet()); assertEquals(allNodesWithNewHost, Set.copyOf(tester.nodeRepository().getNodes())); assertEquals(zoneApp, newHost.allocation().get().owner()); }
class ZoneAppMigrationTest { private final ManualClock clock = new ManualClock(); private final ProvisioningTester tester = new ProvisioningTester.Builder().build(); private final InactiveExpirer inactiveExpirer = new InactiveExpirer(tester.nodeRepository(), clock, Duration.ofDays(99)); private final Version version = Version.fromString("7.42.23"); private final ApplicationId zoneApp = ApplicationId.from("hosted-vespa", "routing", "default"); private final ApplicationId proxyHostApp = ApplicationId.from("hosted-vespa", "proxy-host", "default"); private final ApplicationId tenantHostApp = ApplicationId.from("hosted-vespa", "tenant-host", "default"); private final ApplicationId app1 = tester.makeApplicationId(); private final ApplicationId app2 = tester.makeApplicationId(); @Test public void tenant_host_deallocation_test() { assertEquals(5, tester.nodeRepository().getNodes(NodeType.proxy, Node.State.active).size()); assertEquals(20, tester.nodeRepository().getNodes(NodeType.host, Node.State.active).size()); assertEquals(15, tester.nodeRepository().getNodes(NodeType.tenant, Node.State.active).size()); Set<Node> tenantNodes = Set.copyOf(tester.nodeRepository().getNodes(NodeType.tenant)); tester.activate(zoneApp, prepareSystemApplication(zoneApp, NodeType.proxy, "routing")); assertEquals(5, tester.nodeRepository().getNodes(NodeType.proxy, Node.State.active).size()); assertEquals(20, tester.nodeRepository().getNodes(NodeType.host, Node.State.inactive).size()); assertEquals(tenantNodes, Set.copyOf(tester.nodeRepository().getNodes(NodeType.tenant))); inactiveExpirer.maintain(); assertEquals(5, tester.nodeRepository().getNodes(NodeType.proxy, Node.State.active).size()); assertEquals(20, tester.nodeRepository().getNodes(NodeType.host, Node.State.dirty).size()); assertEquals(tenantNodes, Set.copyOf(tester.nodeRepository().getNodes(NodeType.tenant))); assertEquals(0, tester.nodeRepository().getNodes(NodeType.host).stream().mapToLong(node -> node.status().reboot().wanted()).sum()); tester.nodeRepository().getNodes(NodeType.host) .forEach(node -> tester.nodeRepository().setReady(node.hostname(), Agent.operator, "Readied by host-admin")); assertEquals(5, tester.nodeRepository().getNodes(NodeType.proxy, Node.State.active).size()); assertEquals(20, tester.nodeRepository().getNodes(NodeType.host, Node.State.ready).size()); assertEquals(tenantNodes, Set.copyOf(tester.nodeRepository().getNodes(NodeType.tenant))); tester.activate(tenantHostApp, prepareSystemApplication(tenantHostApp, NodeType.host, "tenant-host")); assertEquals(5, tester.nodeRepository().getNodes(NodeType.proxy, Node.State.active).size()); assertEquals(20, tester.nodeRepository().getNodes(NodeType.host, Node.State.active).size()); assertEquals(tenantNodes, Set.copyOf(tester.nodeRepository().getNodes(NodeType.tenant))); assertEquals(Set.copyOf(tester.nodeRepository().getNodes(NodeType.host)), Set.copyOf(tester.nodeRepository().getNodes(tenantHostApp))); assertEquals(Set.copyOf(tester.nodeRepository().getNodes(NodeType.proxy)), Set.copyOf(tester.nodeRepository().getNodes(zoneApp))); } @Test @Before public void setup() { tester.makeReadyNodes(5, "large", NodeType.proxyhost); tester.makeReadyNodes(5, "large", NodeType.proxy); tester.makeReadyNodes(20, "large", NodeType.host, 3); tester.activate(proxyHostApp, prepareSystemApplication(proxyHostApp, NodeType.proxyhost, "proxy-host")); List<HostSpec> proxyHostSpecs = prepareSystemApplication(zoneApp, NodeType.proxy, "routing"); List<HostSpec> nodeAdminHostSpecs = prepareSystemApplication(zoneApp, NodeType.host, "node-admin"); List<HostSpec> zoneAppHostSpecs = concat(proxyHostSpecs, nodeAdminHostSpecs, Collectors.toList()); tester.activate(zoneApp, zoneAppHostSpecs); activateTenantApplication(app1, 3, 4); activateTenantApplication(app2, 5, 3); } private List<HostSpec> prepareSystemApplication(ApplicationId applicationId, NodeType nodeType, String clusterId) { return tester.prepare(applicationId, ClusterSpec.request(container, ClusterSpec.Id.from(clusterId), version, false, Set.of()), Capacity.fromRequiredNodeType(nodeType), 1); } private void activateTenantApplication(ApplicationId app, int numContainerNodes, int numContentNodes) { List<HostSpec> combinedHostSpecs = new ArrayList<>(numContainerNodes + numContentNodes); combinedHostSpecs.addAll(tester.prepare(app, ClusterSpec.request(container, ClusterSpec.Id.from("web"), version, false, Set.of()), Capacity.fromCount(numContainerNodes, new NodeResources(2, 2, 50)), 1)); combinedHostSpecs.addAll(tester.prepare(app, ClusterSpec.request(content, ClusterSpec.Id.from("store"), version, false, Set.of()), Capacity.fromCount(numContentNodes, new NodeResources(1, 4, 50)), 1)); tester.activate(app, combinedHostSpecs); } private <T, R, A> R concat(Collection<T> c1, Collection<T> c2, Collector<? super T, A, R> collector) { return Stream.concat(c1.stream(), c2.stream()) .collect(collector); } }
class ZoneAppMigrationTest { private final ManualClock clock = new ManualClock(); private final ProvisioningTester tester = new ProvisioningTester.Builder().build(); private final InactiveExpirer inactiveExpirer = new InactiveExpirer(tester.nodeRepository(), clock, Duration.ofDays(99)); private final Version version = Version.fromString("7.42.23"); private final ApplicationId zoneApp = ApplicationId.from("hosted-vespa", "routing", "default"); private final ApplicationId proxyHostApp = ApplicationId.from("hosted-vespa", "proxy-host", "default"); private final ApplicationId tenantHostApp = ApplicationId.from("hosted-vespa", "tenant-host", "default"); private final ApplicationId app1 = tester.makeApplicationId(); private final ApplicationId app2 = tester.makeApplicationId(); @Test public void tenant_host_deallocation_test() { assertEquals(5, tester.nodeRepository().getNodes(NodeType.proxy, Node.State.active).size()); assertEquals(20, tester.nodeRepository().getNodes(NodeType.host, Node.State.active).size()); assertEquals(15, tester.nodeRepository().getNodes(NodeType.tenant, Node.State.active).size()); Set<Node> tenantNodes = Set.copyOf(tester.nodeRepository().getNodes(NodeType.tenant)); tester.activate(zoneApp, prepareSystemApplication(zoneApp, NodeType.proxy, "routing")); assertEquals(5, tester.nodeRepository().getNodes(NodeType.proxy, Node.State.active).size()); assertEquals(20, tester.nodeRepository().getNodes(NodeType.host, Node.State.inactive).size()); assertEquals(tenantNodes, Set.copyOf(tester.nodeRepository().getNodes(NodeType.tenant))); inactiveExpirer.maintain(); assertEquals(5, tester.nodeRepository().getNodes(NodeType.proxy, Node.State.active).size()); assertEquals(20, tester.nodeRepository().getNodes(NodeType.host, Node.State.dirty).size()); assertEquals(tenantNodes, Set.copyOf(tester.nodeRepository().getNodes(NodeType.tenant))); assertEquals(0, tester.nodeRepository().getNodes(NodeType.host).stream().mapToLong(node -> node.status().reboot().wanted()).sum()); tester.nodeRepository().getNodes(NodeType.host) .forEach(node -> tester.nodeRepository().setReady(node.hostname(), Agent.operator, "Readied by host-admin")); assertEquals(5, tester.nodeRepository().getNodes(NodeType.proxy, Node.State.active).size()); assertEquals(20, tester.nodeRepository().getNodes(NodeType.host, Node.State.ready).size()); assertEquals(tenantNodes, Set.copyOf(tester.nodeRepository().getNodes(NodeType.tenant))); tester.activate(tenantHostApp, prepareSystemApplication(tenantHostApp, NodeType.host, "tenant-host")); assertEquals(5, tester.nodeRepository().getNodes(NodeType.proxy, Node.State.active).size()); assertEquals(20, tester.nodeRepository().getNodes(NodeType.host, Node.State.active).size()); assertEquals(tenantNodes, Set.copyOf(tester.nodeRepository().getNodes(NodeType.tenant))); assertEquals(Set.copyOf(tester.nodeRepository().getNodes(NodeType.host)), Set.copyOf(tester.nodeRepository().getNodes(tenantHostApp))); assertEquals(Set.copyOf(tester.nodeRepository().getNodes(NodeType.proxy)), Set.copyOf(tester.nodeRepository().getNodes(zoneApp))); } @Test @Before public void setup() { tester.makeReadyNodes(5, "large", NodeType.proxyhost); tester.makeReadyNodes(5, "large", NodeType.proxy); tester.makeReadyNodes(20, "large", NodeType.host, 3); tester.activate(proxyHostApp, prepareSystemApplication(proxyHostApp, NodeType.proxyhost, "proxy-host")); List<HostSpec> proxyHostSpecs = prepareSystemApplication(zoneApp, NodeType.proxy, "routing"); List<HostSpec> nodeAdminHostSpecs = prepareSystemApplication(zoneApp, NodeType.host, "node-admin"); List<HostSpec> zoneAppHostSpecs = concat(proxyHostSpecs, nodeAdminHostSpecs, Collectors.toList()); tester.activate(zoneApp, zoneAppHostSpecs); activateTenantApplication(app1, 3, 4); activateTenantApplication(app2, 5, 3); } private List<HostSpec> prepareSystemApplication(ApplicationId applicationId, NodeType nodeType, String clusterId) { return tester.prepare(applicationId, ClusterSpec.request(container, ClusterSpec.Id.from(clusterId), version, false, Set.of()), Capacity.fromRequiredNodeType(nodeType), 1); } private void activateTenantApplication(ApplicationId app, int numContainerNodes, int numContentNodes) { List<HostSpec> combinedHostSpecs = new ArrayList<>(numContainerNodes + numContentNodes); combinedHostSpecs.addAll(tester.prepare(app, ClusterSpec.request(container, ClusterSpec.Id.from("web"), version, false, Set.of()), Capacity.fromCount(numContainerNodes, new NodeResources(2, 2, 50)), 1)); combinedHostSpecs.addAll(tester.prepare(app, ClusterSpec.request(content, ClusterSpec.Id.from("store"), version, false, Set.of()), Capacity.fromCount(numContentNodes, new NodeResources(1, 4, 50)), 1)); tester.activate(app, combinedHostSpecs); } private <T, R, A> R concat(Collection<T> c1, Collection<T> c2, Collector<? super T, A, R> collector) { return Stream.concat(c1.stream(), c2.stream()) .collect(collector); } }
Yes
public void conflicting_type_allocation_test() { tester.activate(zoneApp, prepareSystemApplication(zoneApp, NodeType.proxy, "routing")); inactiveExpirer.maintain(); tester.nodeRepository().getNodes(NodeType.host) .forEach(node -> tester.nodeRepository().setReady(node.hostname(), Agent.operator, "Readied by host-admin")); tester.activate(tenantHostApp, prepareSystemApplication(tenantHostApp, NodeType.host, "tenant-host")); Set<Node> allNodes = Set.copyOf(tester.nodeRepository().getNodes()); List<HostSpec> proxyHostSpecs = prepareSystemApplication(zoneApp, NodeType.proxy, "routing"); List<HostSpec> nodeAdminHostSpecs = prepareSystemApplication(zoneApp, NodeType.host, "node-admin"); List<HostSpec> zoneAppHostSpecs = concat(proxyHostSpecs, nodeAdminHostSpecs, Collectors.toList()); tester.activate(zoneApp, zoneAppHostSpecs); assertEquals(0, nodeAdminHostSpecs.size()); assertEquals(allNodes, Set.copyOf(tester.nodeRepository().getNodes())); Node newHost = tester.makeReadyNodes(1, "large", NodeType.host).get(0); proxyHostSpecs = prepareSystemApplication(zoneApp, NodeType.proxy, "routing"); nodeAdminHostSpecs = prepareSystemApplication(zoneApp, NodeType.host, "node-admin"); zoneAppHostSpecs = concat(proxyHostSpecs, nodeAdminHostSpecs, Collectors.toList()); tester.activate(zoneApp, zoneAppHostSpecs); assertEquals(1, nodeAdminHostSpecs.size()); newHost = tester.nodeRepository().getNode(newHost.hostname()).orElseThrow(); Set<Node> allNodesWithNewHost = concat(allNodes, Set.of(newHost), Collectors.toSet()); assertEquals(allNodesWithNewHost, Set.copyOf(tester.nodeRepository().getNodes())); assertEquals(zoneApp, newHost.allocation().get().owner()); }
public void conflicting_type_allocation_test() { tester.activate(zoneApp, prepareSystemApplication(zoneApp, NodeType.proxy, "routing")); inactiveExpirer.maintain(); tester.nodeRepository().getNodes(NodeType.host) .forEach(node -> tester.nodeRepository().setReady(node.hostname(), Agent.operator, "Readied by host-admin")); tester.activate(tenantHostApp, prepareSystemApplication(tenantHostApp, NodeType.host, "tenant-host")); Set<Node> allNodes = Set.copyOf(tester.nodeRepository().getNodes()); List<HostSpec> proxyHostSpecs = prepareSystemApplication(zoneApp, NodeType.proxy, "routing"); List<HostSpec> nodeAdminHostSpecs = prepareSystemApplication(zoneApp, NodeType.host, "node-admin"); List<HostSpec> zoneAppHostSpecs = concat(proxyHostSpecs, nodeAdminHostSpecs, Collectors.toList()); tester.activate(zoneApp, zoneAppHostSpecs); assertEquals(0, nodeAdminHostSpecs.size()); assertEquals(allNodes, Set.copyOf(tester.nodeRepository().getNodes())); Node newHost = tester.makeReadyNodes(1, "large", NodeType.host).get(0); proxyHostSpecs = prepareSystemApplication(zoneApp, NodeType.proxy, "routing"); nodeAdminHostSpecs = prepareSystemApplication(zoneApp, NodeType.host, "node-admin"); zoneAppHostSpecs = concat(proxyHostSpecs, nodeAdminHostSpecs, Collectors.toList()); tester.activate(zoneApp, zoneAppHostSpecs); assertEquals(1, nodeAdminHostSpecs.size()); newHost = tester.nodeRepository().getNode(newHost.hostname()).orElseThrow(); Set<Node> allNodesWithNewHost = concat(allNodes, Set.of(newHost), Collectors.toSet()); assertEquals(allNodesWithNewHost, Set.copyOf(tester.nodeRepository().getNodes())); assertEquals(zoneApp, newHost.allocation().get().owner()); }
class ZoneAppMigrationTest { private final ManualClock clock = new ManualClock(); private final ProvisioningTester tester = new ProvisioningTester.Builder().build(); private final InactiveExpirer inactiveExpirer = new InactiveExpirer(tester.nodeRepository(), clock, Duration.ofDays(99)); private final Version version = Version.fromString("7.42.23"); private final ApplicationId zoneApp = ApplicationId.from("hosted-vespa", "routing", "default"); private final ApplicationId proxyHostApp = ApplicationId.from("hosted-vespa", "proxy-host", "default"); private final ApplicationId tenantHostApp = ApplicationId.from("hosted-vespa", "tenant-host", "default"); private final ApplicationId app1 = tester.makeApplicationId(); private final ApplicationId app2 = tester.makeApplicationId(); @Test public void tenant_host_deallocation_test() { assertEquals(5, tester.nodeRepository().getNodes(NodeType.proxy, Node.State.active).size()); assertEquals(20, tester.nodeRepository().getNodes(NodeType.host, Node.State.active).size()); assertEquals(15, tester.nodeRepository().getNodes(NodeType.tenant, Node.State.active).size()); Set<Node> tenantNodes = Set.copyOf(tester.nodeRepository().getNodes(NodeType.tenant)); tester.activate(zoneApp, prepareSystemApplication(zoneApp, NodeType.proxy, "routing")); assertEquals(5, tester.nodeRepository().getNodes(NodeType.proxy, Node.State.active).size()); assertEquals(20, tester.nodeRepository().getNodes(NodeType.host, Node.State.inactive).size()); assertEquals(tenantNodes, Set.copyOf(tester.nodeRepository().getNodes(NodeType.tenant))); inactiveExpirer.maintain(); assertEquals(5, tester.nodeRepository().getNodes(NodeType.proxy, Node.State.active).size()); assertEquals(20, tester.nodeRepository().getNodes(NodeType.host, Node.State.dirty).size()); assertEquals(tenantNodes, Set.copyOf(tester.nodeRepository().getNodes(NodeType.tenant))); assertEquals(0, tester.nodeRepository().getNodes(NodeType.host).stream().mapToLong(node -> node.status().reboot().wanted()).sum()); tester.nodeRepository().getNodes(NodeType.host) .forEach(node -> tester.nodeRepository().setReady(node.hostname(), Agent.operator, "Readied by host-admin")); assertEquals(5, tester.nodeRepository().getNodes(NodeType.proxy, Node.State.active).size()); assertEquals(20, tester.nodeRepository().getNodes(NodeType.host, Node.State.ready).size()); assertEquals(tenantNodes, Set.copyOf(tester.nodeRepository().getNodes(NodeType.tenant))); tester.activate(tenantHostApp, prepareSystemApplication(tenantHostApp, NodeType.host, "tenant-host")); assertEquals(5, tester.nodeRepository().getNodes(NodeType.proxy, Node.State.active).size()); assertEquals(20, tester.nodeRepository().getNodes(NodeType.host, Node.State.active).size()); assertEquals(tenantNodes, Set.copyOf(tester.nodeRepository().getNodes(NodeType.tenant))); assertEquals(Set.copyOf(tester.nodeRepository().getNodes(NodeType.host)), Set.copyOf(tester.nodeRepository().getNodes(tenantHostApp))); assertEquals(Set.copyOf(tester.nodeRepository().getNodes(NodeType.proxy)), Set.copyOf(tester.nodeRepository().getNodes(zoneApp))); } @Test @Before public void setup() { tester.makeReadyNodes(5, "large", NodeType.proxyhost); tester.makeReadyNodes(5, "large", NodeType.proxy); tester.makeReadyNodes(20, "large", NodeType.host, 3); tester.activate(proxyHostApp, prepareSystemApplication(proxyHostApp, NodeType.proxyhost, "proxy-host")); List<HostSpec> proxyHostSpecs = prepareSystemApplication(zoneApp, NodeType.proxy, "routing"); List<HostSpec> nodeAdminHostSpecs = prepareSystemApplication(zoneApp, NodeType.host, "node-admin"); List<HostSpec> zoneAppHostSpecs = concat(proxyHostSpecs, nodeAdminHostSpecs, Collectors.toList()); tester.activate(zoneApp, zoneAppHostSpecs); activateTenantApplication(app1, 3, 4); activateTenantApplication(app2, 5, 3); } private List<HostSpec> prepareSystemApplication(ApplicationId applicationId, NodeType nodeType, String clusterId) { return tester.prepare(applicationId, ClusterSpec.request(container, ClusterSpec.Id.from(clusterId), version, false, Set.of()), Capacity.fromRequiredNodeType(nodeType), 1); } private void activateTenantApplication(ApplicationId app, int numContainerNodes, int numContentNodes) { List<HostSpec> combinedHostSpecs = new ArrayList<>(numContainerNodes + numContentNodes); combinedHostSpecs.addAll(tester.prepare(app, ClusterSpec.request(container, ClusterSpec.Id.from("web"), version, false, Set.of()), Capacity.fromCount(numContainerNodes, new NodeResources(2, 2, 50)), 1)); combinedHostSpecs.addAll(tester.prepare(app, ClusterSpec.request(content, ClusterSpec.Id.from("store"), version, false, Set.of()), Capacity.fromCount(numContentNodes, new NodeResources(1, 4, 50)), 1)); tester.activate(app, combinedHostSpecs); } private <T, R, A> R concat(Collection<T> c1, Collection<T> c2, Collector<? super T, A, R> collector) { return Stream.concat(c1.stream(), c2.stream()) .collect(collector); } }
class ZoneAppMigrationTest { private final ManualClock clock = new ManualClock(); private final ProvisioningTester tester = new ProvisioningTester.Builder().build(); private final InactiveExpirer inactiveExpirer = new InactiveExpirer(tester.nodeRepository(), clock, Duration.ofDays(99)); private final Version version = Version.fromString("7.42.23"); private final ApplicationId zoneApp = ApplicationId.from("hosted-vespa", "routing", "default"); private final ApplicationId proxyHostApp = ApplicationId.from("hosted-vespa", "proxy-host", "default"); private final ApplicationId tenantHostApp = ApplicationId.from("hosted-vespa", "tenant-host", "default"); private final ApplicationId app1 = tester.makeApplicationId(); private final ApplicationId app2 = tester.makeApplicationId(); @Test public void tenant_host_deallocation_test() { assertEquals(5, tester.nodeRepository().getNodes(NodeType.proxy, Node.State.active).size()); assertEquals(20, tester.nodeRepository().getNodes(NodeType.host, Node.State.active).size()); assertEquals(15, tester.nodeRepository().getNodes(NodeType.tenant, Node.State.active).size()); Set<Node> tenantNodes = Set.copyOf(tester.nodeRepository().getNodes(NodeType.tenant)); tester.activate(zoneApp, prepareSystemApplication(zoneApp, NodeType.proxy, "routing")); assertEquals(5, tester.nodeRepository().getNodes(NodeType.proxy, Node.State.active).size()); assertEquals(20, tester.nodeRepository().getNodes(NodeType.host, Node.State.inactive).size()); assertEquals(tenantNodes, Set.copyOf(tester.nodeRepository().getNodes(NodeType.tenant))); inactiveExpirer.maintain(); assertEquals(5, tester.nodeRepository().getNodes(NodeType.proxy, Node.State.active).size()); assertEquals(20, tester.nodeRepository().getNodes(NodeType.host, Node.State.dirty).size()); assertEquals(tenantNodes, Set.copyOf(tester.nodeRepository().getNodes(NodeType.tenant))); assertEquals(0, tester.nodeRepository().getNodes(NodeType.host).stream().mapToLong(node -> node.status().reboot().wanted()).sum()); tester.nodeRepository().getNodes(NodeType.host) .forEach(node -> tester.nodeRepository().setReady(node.hostname(), Agent.operator, "Readied by host-admin")); assertEquals(5, tester.nodeRepository().getNodes(NodeType.proxy, Node.State.active).size()); assertEquals(20, tester.nodeRepository().getNodes(NodeType.host, Node.State.ready).size()); assertEquals(tenantNodes, Set.copyOf(tester.nodeRepository().getNodes(NodeType.tenant))); tester.activate(tenantHostApp, prepareSystemApplication(tenantHostApp, NodeType.host, "tenant-host")); assertEquals(5, tester.nodeRepository().getNodes(NodeType.proxy, Node.State.active).size()); assertEquals(20, tester.nodeRepository().getNodes(NodeType.host, Node.State.active).size()); assertEquals(tenantNodes, Set.copyOf(tester.nodeRepository().getNodes(NodeType.tenant))); assertEquals(Set.copyOf(tester.nodeRepository().getNodes(NodeType.host)), Set.copyOf(tester.nodeRepository().getNodes(tenantHostApp))); assertEquals(Set.copyOf(tester.nodeRepository().getNodes(NodeType.proxy)), Set.copyOf(tester.nodeRepository().getNodes(zoneApp))); } @Test @Before public void setup() { tester.makeReadyNodes(5, "large", NodeType.proxyhost); tester.makeReadyNodes(5, "large", NodeType.proxy); tester.makeReadyNodes(20, "large", NodeType.host, 3); tester.activate(proxyHostApp, prepareSystemApplication(proxyHostApp, NodeType.proxyhost, "proxy-host")); List<HostSpec> proxyHostSpecs = prepareSystemApplication(zoneApp, NodeType.proxy, "routing"); List<HostSpec> nodeAdminHostSpecs = prepareSystemApplication(zoneApp, NodeType.host, "node-admin"); List<HostSpec> zoneAppHostSpecs = concat(proxyHostSpecs, nodeAdminHostSpecs, Collectors.toList()); tester.activate(zoneApp, zoneAppHostSpecs); activateTenantApplication(app1, 3, 4); activateTenantApplication(app2, 5, 3); } private List<HostSpec> prepareSystemApplication(ApplicationId applicationId, NodeType nodeType, String clusterId) { return tester.prepare(applicationId, ClusterSpec.request(container, ClusterSpec.Id.from(clusterId), version, false, Set.of()), Capacity.fromRequiredNodeType(nodeType), 1); } private void activateTenantApplication(ApplicationId app, int numContainerNodes, int numContentNodes) { List<HostSpec> combinedHostSpecs = new ArrayList<>(numContainerNodes + numContentNodes); combinedHostSpecs.addAll(tester.prepare(app, ClusterSpec.request(container, ClusterSpec.Id.from("web"), version, false, Set.of()), Capacity.fromCount(numContainerNodes, new NodeResources(2, 2, 50)), 1)); combinedHostSpecs.addAll(tester.prepare(app, ClusterSpec.request(content, ClusterSpec.Id.from("store"), version, false, Set.of()), Capacity.fromCount(numContentNodes, new NodeResources(1, 4, 50)), 1)); tester.activate(app, combinedHostSpecs); } private <T, R, A> R concat(Collection<T> c1, Collection<T> c2, Collector<? super T, A, R> collector) { return Stream.concat(c1.stream(), c2.stream()) .collect(collector); } }
Remove rotationId
private ClusterMembership(String stringValue, Version vespaVersion) { String[] components = stringValue.split("/"); if (components.length < 4) throw new RuntimeException("Could not parse '" + stringValue + "' to a cluster membership. " + "Expected 'clusterType/clusterId/groupId/index[/retired][/exclusive][/rotationId,...]'"); boolean exclusive = false; if (components.length > 4) { for (int i = 4; i < components.length; i++) { String component = components[i]; switch (component) { case "exclusive": exclusive = true; break; case "retired": retired = true; break; } } } this.cluster = ClusterSpec.from(ClusterSpec.Type.valueOf(components[0]), ClusterSpec.Id.from(components[1]), ClusterSpec.Group.from(Integer.valueOf(components[2])), vespaVersion, exclusive); this.index = Integer.parseInt(components[3]); this.stringValue = toStringValue(); }
"Expected 'clusterType/clusterId/groupId/index[/retired][/exclusive][/rotationId,...]'");
private ClusterMembership(String stringValue, Version vespaVersion) { String[] components = stringValue.split("/"); if (components.length < 4) throw new RuntimeException("Could not parse '" + stringValue + "' to a cluster membership. " + "Expected 'clusterType/clusterId/groupId/index[/retired][/exclusive]'"); boolean exclusive = false; if (components.length > 4) { for (int i = 4; i < components.length; i++) { String component = components[i]; switch (component) { case "exclusive": exclusive = true; break; case "retired": retired = true; break; } } } this.cluster = ClusterSpec.from(ClusterSpec.Type.valueOf(components[0]), ClusterSpec.Id.from(components[1]), ClusterSpec.Group.from(Integer.valueOf(components[2])), vespaVersion, exclusive); this.index = Integer.parseInt(components[3]); this.stringValue = toStringValue(); }
class ClusterMembership { private ClusterSpec cluster; private int index; private boolean retired; private String stringValue; protected ClusterMembership() {} private ClusterMembership(ClusterSpec cluster, int index, boolean retired) { this.cluster = cluster; this.index = index; this.retired = retired; this.stringValue = toStringValue(); } protected String toStringValue() { return cluster.type().name() + "/" + cluster.id().value() + (cluster.group().isPresent() ? "/" + cluster.group().get().index() : "") + "/" + index + ( cluster.isExclusive() ? "/exclusive" : "") + ( retired ? "/retired" : ""); } /** Returns the cluster this node is a member of */ public ClusterSpec cluster() { return cluster; } /** Returns the index of this node within the cluster */ public int index() { return index; } /** Returns whether the cluster should prepare for this node to be removed */ public boolean retired() { return retired; } /** Returns a copy of this which is retired */ public ClusterMembership retire() { return new ClusterMembership(cluster, index, true); } /** Returns a copy of this node which is not retired */ public ClusterMembership unretire() { return new ClusterMembership(cluster, index, false); } public ClusterMembership with(ClusterSpec newCluster) { return new ClusterMembership(newCluster, index, retired); } /** * Returns all the information in this as a string which can be used to construct the same ClusterMembership * instance using {@link */ public String stringValue() { return stringValue; } @Override public int hashCode() { return stringValue().hashCode(); } @Override public boolean equals(Object other) { if (other == this) return true; if ( ! (other instanceof ClusterMembership)) return false; return ((ClusterMembership)other).stringValue().equals(stringValue()); } @Override public String toString() { return stringValue(); } public static ClusterMembership from(String stringValue, Version vespaVersion) { return new ClusterMembership(stringValue, vespaVersion); } public static ClusterMembership from(ClusterSpec cluster, int index) { return new ClusterMembership(cluster, index, false); } public static ClusterMembership retiredFrom(ClusterSpec cluster, int index) { return new ClusterMembership(cluster, index, true); } }
class ClusterMembership { private ClusterSpec cluster; private int index; private boolean retired; private String stringValue; protected ClusterMembership() {} private ClusterMembership(ClusterSpec cluster, int index, boolean retired) { this.cluster = cluster; this.index = index; this.retired = retired; this.stringValue = toStringValue(); } protected String toStringValue() { return cluster.type().name() + "/" + cluster.id().value() + (cluster.group().isPresent() ? "/" + cluster.group().get().index() : "") + "/" + index + ( cluster.isExclusive() ? "/exclusive" : "") + ( retired ? "/retired" : ""); } /** Returns the cluster this node is a member of */ public ClusterSpec cluster() { return cluster; } /** Returns the index of this node within the cluster */ public int index() { return index; } /** Returns whether the cluster should prepare for this node to be removed */ public boolean retired() { return retired; } /** Returns a copy of this which is retired */ public ClusterMembership retire() { return new ClusterMembership(cluster, index, true); } /** Returns a copy of this node which is not retired */ public ClusterMembership unretire() { return new ClusterMembership(cluster, index, false); } public ClusterMembership with(ClusterSpec newCluster) { return new ClusterMembership(newCluster, index, retired); } /** * Returns all the information in this as a string which can be used to construct the same ClusterMembership * instance using {@link */ public String stringValue() { return stringValue; } @Override public int hashCode() { return stringValue().hashCode(); } @Override public boolean equals(Object other) { if (other == this) return true; if ( ! (other instanceof ClusterMembership)) return false; return ((ClusterMembership)other).stringValue().equals(stringValue()); } @Override public String toString() { return stringValue(); } public static ClusterMembership from(String stringValue, Version vespaVersion) { return new ClusterMembership(stringValue, vespaVersion); } public static ClusterMembership from(ClusterSpec cluster, int index) { return new ClusterMembership(cluster, index, false); } public static ClusterMembership retiredFrom(ClusterSpec cluster, int index) { return new ClusterMembership(cluster, index, true); } }
I am not sure I understand the warning, not all applications have a global service id?
void writeContainerEndpointsZK(Optional<String> globalServiceId) { if (!params.containerEndpoints().isEmpty()) { containerEndpoints.write(applicationId, params.containerEndpoints()); } else { if (globalServiceId.isEmpty()) { log.log(LogLevel.WARNING, "Want to write rotations " + rotationsSet + " as container endpoints, but " + applicationId + " has no global-service-id. This should not happen"); return; } containerEndpoints.write(applicationId, toContainerEndpoints(globalServiceId.get(), rotationsSet)); } checkTimeout("write container endpoints to zookeeper"); }
log.log(LogLevel.WARNING, "Want to write rotations " + rotationsSet + " as container endpoints, but " + applicationId + " has no global-service-id. This should not happen");
void writeContainerEndpointsZK(Optional<String> globalServiceId) { if (!params.containerEndpoints().isEmpty()) { containerEndpoints.write(applicationId, params.containerEndpoints()); } else { if (!rotationsSet.isEmpty()) { if (globalServiceId.isEmpty()) { log.log(LogLevel.WARNING, "Want to write rotations " + rotationsSet + " as container endpoints, but " + applicationId + " has no global-service-id. This should not happen"); return; } containerEndpoints.write(applicationId, toContainerEndpoints(globalServiceId.get(), rotationsSet)); } } checkTimeout("write container endpoints to zookeeper"); }
class Preparation { final SessionContext context; final DeployLogger logger; final PrepareParams params; final Optional<ApplicationSet> currentActiveApplicationSet; final Path tenantPath; final ApplicationId applicationId; /** The version of Vespa the application to be prepared specifies for its nodes */ final com.yahoo.component.Version vespaVersion; final Rotations rotations; final ContainerEndpointsCache containerEndpoints; final Set<Rotation> rotationsSet; final ModelContext.Properties properties; private ApplicationPackage applicationPackage; private List<PreparedModelsBuilder.PreparedModelResult> modelResultList; private PrepareResult prepareResult; private final PreparedModelsBuilder preparedModelsBuilder; Preparation(SessionContext context, DeployLogger logger, PrepareParams params, Optional<ApplicationSet> currentActiveApplicationSet, Path tenantPath) { this.context = context; this.logger = logger; this.params = params; this.currentActiveApplicationSet = currentActiveApplicationSet; this.tenantPath = tenantPath; this.applicationId = params.getApplicationId(); this.vespaVersion = params.vespaVersion().orElse(Vtag.currentVersion); this.rotations = new Rotations(curator, tenantPath); this.containerEndpoints = new ContainerEndpointsCache(tenantPath, curator); this.rotationsSet = getRotations(params.rotations()); this.properties = new ModelContextImpl.Properties(params.getApplicationId(), configserverConfig.multitenant(), ConfigServerSpec.fromConfig(configserverConfig), HostName.from(configserverConfig.loadBalancerAddress()), configserverConfig.ztsUrl() != null ? URI.create(configserverConfig.ztsUrl()) : null, configserverConfig.athenzDnsSuffix(), configserverConfig.hostedVespa(), zone, rotationsSet, params.isBootstrap(), ! currentActiveApplicationSet.isPresent(), context.getFlagSource()); this.preparedModelsBuilder = new PreparedModelsBuilder(modelFactoryRegistry, permanentApplicationPackage, configDefinitionRepo, fileDistributionFactory, hostProvisionerProvider, context, logger, params, currentActiveApplicationSet, properties, configserverConfig); } void checkTimeout(String step) { if (! params.getTimeoutBudget().hasTimeLeft()) { String used = params.getTimeoutBudget().timesUsed(); throw new RuntimeException("prepare timed out "+used+" after "+step+" step: " + applicationId); } } void preprocess() { try { this.applicationPackage = context.getApplicationPackage().preprocess(properties.zone(), logger); } catch (IOException | TransformerException | ParserConfigurationException | SAXException e) { throw new IllegalArgumentException("Error preprocessing application package for " + applicationId, e); } checkTimeout("preprocess"); } AllocatedHosts buildModels(Instant now) { SettableOptional<AllocatedHosts> allocatedHosts = new SettableOptional<>(); this.modelResultList = preparedModelsBuilder.buildModels(applicationId, vespaVersion, applicationPackage, allocatedHosts, now); checkTimeout("build models"); return allocatedHosts.get(); } void makeResult(AllocatedHosts allocatedHosts) { this.prepareResult = new PrepareResult(allocatedHosts, modelResultList); checkTimeout("making result from models"); } void writeStateZK() { log.log(LogLevel.DEBUG, "Writing application package state to zookeeper"); writeStateToZooKeeper(context.getSessionZooKeeperClient(), applicationPackage, applicationId, vespaVersion, logger, prepareResult.getFileRegistries(), prepareResult.allocatedHosts()); checkTimeout("write state to zookeeper"); } void writeRotZK() { rotations.writeRotationsToZooKeeper(applicationId, rotationsSet); checkTimeout("write rotations to zookeeper"); } void distribute() { prepareResult.asList().forEach(modelResult -> modelResult.model .distributeFiles(modelResult.fileDistributionProvider.getFileDistribution())); checkTimeout("distribute files"); } ConfigChangeActions result() { return prepareResult.getConfigChangeActions(); } private Set<Rotation> getRotations(Set<Rotation> rotations) { if (rotations == null || rotations.isEmpty()) { rotations = this.rotations.readRotationsFromZooKeeper(applicationId); } return rotations; } }
class Preparation { final SessionContext context; final DeployLogger logger; final PrepareParams params; final Optional<ApplicationSet> currentActiveApplicationSet; final Path tenantPath; final ApplicationId applicationId; /** The version of Vespa the application to be prepared specifies for its nodes */ final com.yahoo.component.Version vespaVersion; final Rotations rotations; final ContainerEndpointsCache containerEndpoints; final Set<Rotation> rotationsSet; final ModelContext.Properties properties; private ApplicationPackage applicationPackage; private List<PreparedModelsBuilder.PreparedModelResult> modelResultList; private PrepareResult prepareResult; private final PreparedModelsBuilder preparedModelsBuilder; Preparation(SessionContext context, DeployLogger logger, PrepareParams params, Optional<ApplicationSet> currentActiveApplicationSet, Path tenantPath) { this.context = context; this.logger = logger; this.params = params; this.currentActiveApplicationSet = currentActiveApplicationSet; this.tenantPath = tenantPath; this.applicationId = params.getApplicationId(); this.vespaVersion = params.vespaVersion().orElse(Vtag.currentVersion); this.rotations = new Rotations(curator, tenantPath); this.containerEndpoints = new ContainerEndpointsCache(tenantPath, curator); this.rotationsSet = getRotations(params.rotations()); this.properties = new ModelContextImpl.Properties(params.getApplicationId(), configserverConfig.multitenant(), ConfigServerSpec.fromConfig(configserverConfig), HostName.from(configserverConfig.loadBalancerAddress()), configserverConfig.ztsUrl() != null ? URI.create(configserverConfig.ztsUrl()) : null, configserverConfig.athenzDnsSuffix(), configserverConfig.hostedVespa(), zone, rotationsSet, params.isBootstrap(), ! currentActiveApplicationSet.isPresent(), context.getFlagSource()); this.preparedModelsBuilder = new PreparedModelsBuilder(modelFactoryRegistry, permanentApplicationPackage, configDefinitionRepo, fileDistributionFactory, hostProvisionerProvider, context, logger, params, currentActiveApplicationSet, properties, configserverConfig); } void checkTimeout(String step) { if (! params.getTimeoutBudget().hasTimeLeft()) { String used = params.getTimeoutBudget().timesUsed(); throw new RuntimeException("prepare timed out "+used+" after "+step+" step: " + applicationId); } } void preprocess() { try { this.applicationPackage = context.getApplicationPackage().preprocess(properties.zone(), logger); } catch (IOException | TransformerException | ParserConfigurationException | SAXException e) { throw new IllegalArgumentException("Error preprocessing application package for " + applicationId, e); } checkTimeout("preprocess"); } AllocatedHosts buildModels(Instant now) { SettableOptional<AllocatedHosts> allocatedHosts = new SettableOptional<>(); this.modelResultList = preparedModelsBuilder.buildModels(applicationId, vespaVersion, applicationPackage, allocatedHosts, now); checkTimeout("build models"); return allocatedHosts.get(); } void makeResult(AllocatedHosts allocatedHosts) { this.prepareResult = new PrepareResult(allocatedHosts, modelResultList); checkTimeout("making result from models"); } void writeStateZK() { log.log(LogLevel.DEBUG, "Writing application package state to zookeeper"); writeStateToZooKeeper(context.getSessionZooKeeperClient(), applicationPackage, applicationId, vespaVersion, logger, prepareResult.getFileRegistries(), prepareResult.allocatedHosts()); checkTimeout("write state to zookeeper"); } void writeRotZK() { rotations.writeRotationsToZooKeeper(applicationId, rotationsSet); checkTimeout("write rotations to zookeeper"); } void distribute() { prepareResult.asList().forEach(modelResult -> modelResult.model .distributeFiles(modelResult.fileDistributionProvider.getFileDistribution())); checkTimeout("distribute files"); } ConfigChangeActions result() { return prepareResult.getConfigChangeActions(); } private Set<Rotation> getRotations(Set<Rotation> rotations) { if (rotations == null || rotations.isEmpty()) { rotations = this.rotations.readRotationsFromZooKeeper(applicationId); } return rotations; } }
It is an assertion, i.e something declarative. When it fails you'll see java.lang.AssertionError: "Description of the assertion" More importantly, we should optimize for making the test code easy to read, which means describing the assertions rather than writing "error messages which will be output when they fail": `assert...(description of what we are asserting, ... `not `assert...(error message, ...` Anyway, this isn't that important ...
public void testSoftmaxImport() { TestableTensorFlowModel model = new TestableTensorFlowModel("test", "src/test/models/tensorflow/softmax/saved", 1, 5); ImportedModel.Signature signature = model.get().signature("serving_default"); Assert.assertEquals("Has skipped outputs", 0, model.get().signature("serving_default").skippedOutputs().size()); ImportedMlFunction output = signature.outputFunction("y", "y"); assertNotNull(output); model.assertEqualResult("input", "output"); }
Assert.assertEquals("Has skipped outputs",
public void testSoftmaxImport() { TestableTensorFlowModel model = new TestableTensorFlowModel("test", "src/test/models/tensorflow/softmax/saved", 1, 5); ImportedModel.Signature signature = model.get().signature("serving_default"); Assert.assertEquals("Should have no skipped outputs", 0, model.get().signature("serving_default").skippedOutputs().size()); ImportedMlFunction output = signature.outputFunction("y", "y"); assertNotNull(output); model.assertEqualResult("input", "output"); }
class SoftmaxImportTestCase { @Test }
class SoftmaxImportTestCase { @Test }
No, it's an optional attribute in _deployment.xml_. If a tenant sets `global-service-id` and then removes it at a later time, this may happen as we never unassign rotations. The log message is here to verify if such a condition is present in main today.
void writeContainerEndpointsZK(Optional<String> globalServiceId) { if (!params.containerEndpoints().isEmpty()) { containerEndpoints.write(applicationId, params.containerEndpoints()); } else { if (globalServiceId.isEmpty()) { log.log(LogLevel.WARNING, "Want to write rotations " + rotationsSet + " as container endpoints, but " + applicationId + " has no global-service-id. This should not happen"); return; } containerEndpoints.write(applicationId, toContainerEndpoints(globalServiceId.get(), rotationsSet)); } checkTimeout("write container endpoints to zookeeper"); }
log.log(LogLevel.WARNING, "Want to write rotations " + rotationsSet + " as container endpoints, but " + applicationId + " has no global-service-id. This should not happen");
void writeContainerEndpointsZK(Optional<String> globalServiceId) { if (!params.containerEndpoints().isEmpty()) { containerEndpoints.write(applicationId, params.containerEndpoints()); } else { if (!rotationsSet.isEmpty()) { if (globalServiceId.isEmpty()) { log.log(LogLevel.WARNING, "Want to write rotations " + rotationsSet + " as container endpoints, but " + applicationId + " has no global-service-id. This should not happen"); return; } containerEndpoints.write(applicationId, toContainerEndpoints(globalServiceId.get(), rotationsSet)); } } checkTimeout("write container endpoints to zookeeper"); }
class Preparation { final SessionContext context; final DeployLogger logger; final PrepareParams params; final Optional<ApplicationSet> currentActiveApplicationSet; final Path tenantPath; final ApplicationId applicationId; /** The version of Vespa the application to be prepared specifies for its nodes */ final com.yahoo.component.Version vespaVersion; final Rotations rotations; final ContainerEndpointsCache containerEndpoints; final Set<Rotation> rotationsSet; final ModelContext.Properties properties; private ApplicationPackage applicationPackage; private List<PreparedModelsBuilder.PreparedModelResult> modelResultList; private PrepareResult prepareResult; private final PreparedModelsBuilder preparedModelsBuilder; Preparation(SessionContext context, DeployLogger logger, PrepareParams params, Optional<ApplicationSet> currentActiveApplicationSet, Path tenantPath) { this.context = context; this.logger = logger; this.params = params; this.currentActiveApplicationSet = currentActiveApplicationSet; this.tenantPath = tenantPath; this.applicationId = params.getApplicationId(); this.vespaVersion = params.vespaVersion().orElse(Vtag.currentVersion); this.rotations = new Rotations(curator, tenantPath); this.containerEndpoints = new ContainerEndpointsCache(tenantPath, curator); this.rotationsSet = getRotations(params.rotations()); this.properties = new ModelContextImpl.Properties(params.getApplicationId(), configserverConfig.multitenant(), ConfigServerSpec.fromConfig(configserverConfig), HostName.from(configserverConfig.loadBalancerAddress()), configserverConfig.ztsUrl() != null ? URI.create(configserverConfig.ztsUrl()) : null, configserverConfig.athenzDnsSuffix(), configserverConfig.hostedVespa(), zone, rotationsSet, params.isBootstrap(), ! currentActiveApplicationSet.isPresent(), context.getFlagSource()); this.preparedModelsBuilder = new PreparedModelsBuilder(modelFactoryRegistry, permanentApplicationPackage, configDefinitionRepo, fileDistributionFactory, hostProvisionerProvider, context, logger, params, currentActiveApplicationSet, properties, configserverConfig); } void checkTimeout(String step) { if (! params.getTimeoutBudget().hasTimeLeft()) { String used = params.getTimeoutBudget().timesUsed(); throw new RuntimeException("prepare timed out "+used+" after "+step+" step: " + applicationId); } } void preprocess() { try { this.applicationPackage = context.getApplicationPackage().preprocess(properties.zone(), logger); } catch (IOException | TransformerException | ParserConfigurationException | SAXException e) { throw new IllegalArgumentException("Error preprocessing application package for " + applicationId, e); } checkTimeout("preprocess"); } AllocatedHosts buildModels(Instant now) { SettableOptional<AllocatedHosts> allocatedHosts = new SettableOptional<>(); this.modelResultList = preparedModelsBuilder.buildModels(applicationId, vespaVersion, applicationPackage, allocatedHosts, now); checkTimeout("build models"); return allocatedHosts.get(); } void makeResult(AllocatedHosts allocatedHosts) { this.prepareResult = new PrepareResult(allocatedHosts, modelResultList); checkTimeout("making result from models"); } void writeStateZK() { log.log(LogLevel.DEBUG, "Writing application package state to zookeeper"); writeStateToZooKeeper(context.getSessionZooKeeperClient(), applicationPackage, applicationId, vespaVersion, logger, prepareResult.getFileRegistries(), prepareResult.allocatedHosts()); checkTimeout("write state to zookeeper"); } void writeRotZK() { rotations.writeRotationsToZooKeeper(applicationId, rotationsSet); checkTimeout("write rotations to zookeeper"); } void distribute() { prepareResult.asList().forEach(modelResult -> modelResult.model .distributeFiles(modelResult.fileDistributionProvider.getFileDistribution())); checkTimeout("distribute files"); } ConfigChangeActions result() { return prepareResult.getConfigChangeActions(); } private Set<Rotation> getRotations(Set<Rotation> rotations) { if (rotations == null || rotations.isEmpty()) { rotations = this.rotations.readRotationsFromZooKeeper(applicationId); } return rotations; } }
class Preparation { final SessionContext context; final DeployLogger logger; final PrepareParams params; final Optional<ApplicationSet> currentActiveApplicationSet; final Path tenantPath; final ApplicationId applicationId; /** The version of Vespa the application to be prepared specifies for its nodes */ final com.yahoo.component.Version vespaVersion; final Rotations rotations; final ContainerEndpointsCache containerEndpoints; final Set<Rotation> rotationsSet; final ModelContext.Properties properties; private ApplicationPackage applicationPackage; private List<PreparedModelsBuilder.PreparedModelResult> modelResultList; private PrepareResult prepareResult; private final PreparedModelsBuilder preparedModelsBuilder; Preparation(SessionContext context, DeployLogger logger, PrepareParams params, Optional<ApplicationSet> currentActiveApplicationSet, Path tenantPath) { this.context = context; this.logger = logger; this.params = params; this.currentActiveApplicationSet = currentActiveApplicationSet; this.tenantPath = tenantPath; this.applicationId = params.getApplicationId(); this.vespaVersion = params.vespaVersion().orElse(Vtag.currentVersion); this.rotations = new Rotations(curator, tenantPath); this.containerEndpoints = new ContainerEndpointsCache(tenantPath, curator); this.rotationsSet = getRotations(params.rotations()); this.properties = new ModelContextImpl.Properties(params.getApplicationId(), configserverConfig.multitenant(), ConfigServerSpec.fromConfig(configserverConfig), HostName.from(configserverConfig.loadBalancerAddress()), configserverConfig.ztsUrl() != null ? URI.create(configserverConfig.ztsUrl()) : null, configserverConfig.athenzDnsSuffix(), configserverConfig.hostedVespa(), zone, rotationsSet, params.isBootstrap(), ! currentActiveApplicationSet.isPresent(), context.getFlagSource()); this.preparedModelsBuilder = new PreparedModelsBuilder(modelFactoryRegistry, permanentApplicationPackage, configDefinitionRepo, fileDistributionFactory, hostProvisionerProvider, context, logger, params, currentActiveApplicationSet, properties, configserverConfig); } void checkTimeout(String step) { if (! params.getTimeoutBudget().hasTimeLeft()) { String used = params.getTimeoutBudget().timesUsed(); throw new RuntimeException("prepare timed out "+used+" after "+step+" step: " + applicationId); } } void preprocess() { try { this.applicationPackage = context.getApplicationPackage().preprocess(properties.zone(), logger); } catch (IOException | TransformerException | ParserConfigurationException | SAXException e) { throw new IllegalArgumentException("Error preprocessing application package for " + applicationId, e); } checkTimeout("preprocess"); } AllocatedHosts buildModels(Instant now) { SettableOptional<AllocatedHosts> allocatedHosts = new SettableOptional<>(); this.modelResultList = preparedModelsBuilder.buildModels(applicationId, vespaVersion, applicationPackage, allocatedHosts, now); checkTimeout("build models"); return allocatedHosts.get(); } void makeResult(AllocatedHosts allocatedHosts) { this.prepareResult = new PrepareResult(allocatedHosts, modelResultList); checkTimeout("making result from models"); } void writeStateZK() { log.log(LogLevel.DEBUG, "Writing application package state to zookeeper"); writeStateToZooKeeper(context.getSessionZooKeeperClient(), applicationPackage, applicationId, vespaVersion, logger, prepareResult.getFileRegistries(), prepareResult.allocatedHosts()); checkTimeout("write state to zookeeper"); } void writeRotZK() { rotations.writeRotationsToZooKeeper(applicationId, rotationsSet); checkTimeout("write rotations to zookeeper"); } void distribute() { prepareResult.asList().forEach(modelResult -> modelResult.model .distributeFiles(modelResult.fileDistributionProvider.getFileDistribution())); checkTimeout("distribute files"); } ConfigChangeActions result() { return prepareResult.getConfigChangeActions(); } private Set<Rotation> getRotations(Set<Rotation> rotations) { if (rotations == null || rotations.isEmpty()) { rotations = this.rotations.readRotationsFromZooKeeper(applicationId); } return rotations; } }
Sorry for the bad wording in the question. What I meant to ask was: If an application has no global service id and no rotations, why log here? I.e. shouldn't you only log when rotation set is empty?
void writeContainerEndpointsZK(Optional<String> globalServiceId) { if (!params.containerEndpoints().isEmpty()) { containerEndpoints.write(applicationId, params.containerEndpoints()); } else { if (globalServiceId.isEmpty()) { log.log(LogLevel.WARNING, "Want to write rotations " + rotationsSet + " as container endpoints, but " + applicationId + " has no global-service-id. This should not happen"); return; } containerEndpoints.write(applicationId, toContainerEndpoints(globalServiceId.get(), rotationsSet)); } checkTimeout("write container endpoints to zookeeper"); }
log.log(LogLevel.WARNING, "Want to write rotations " + rotationsSet + " as container endpoints, but " + applicationId + " has no global-service-id. This should not happen");
void writeContainerEndpointsZK(Optional<String> globalServiceId) { if (!params.containerEndpoints().isEmpty()) { containerEndpoints.write(applicationId, params.containerEndpoints()); } else { if (!rotationsSet.isEmpty()) { if (globalServiceId.isEmpty()) { log.log(LogLevel.WARNING, "Want to write rotations " + rotationsSet + " as container endpoints, but " + applicationId + " has no global-service-id. This should not happen"); return; } containerEndpoints.write(applicationId, toContainerEndpoints(globalServiceId.get(), rotationsSet)); } } checkTimeout("write container endpoints to zookeeper"); }
class Preparation { final SessionContext context; final DeployLogger logger; final PrepareParams params; final Optional<ApplicationSet> currentActiveApplicationSet; final Path tenantPath; final ApplicationId applicationId; /** The version of Vespa the application to be prepared specifies for its nodes */ final com.yahoo.component.Version vespaVersion; final Rotations rotations; final ContainerEndpointsCache containerEndpoints; final Set<Rotation> rotationsSet; final ModelContext.Properties properties; private ApplicationPackage applicationPackage; private List<PreparedModelsBuilder.PreparedModelResult> modelResultList; private PrepareResult prepareResult; private final PreparedModelsBuilder preparedModelsBuilder; Preparation(SessionContext context, DeployLogger logger, PrepareParams params, Optional<ApplicationSet> currentActiveApplicationSet, Path tenantPath) { this.context = context; this.logger = logger; this.params = params; this.currentActiveApplicationSet = currentActiveApplicationSet; this.tenantPath = tenantPath; this.applicationId = params.getApplicationId(); this.vespaVersion = params.vespaVersion().orElse(Vtag.currentVersion); this.rotations = new Rotations(curator, tenantPath); this.containerEndpoints = new ContainerEndpointsCache(tenantPath, curator); this.rotationsSet = getRotations(params.rotations()); this.properties = new ModelContextImpl.Properties(params.getApplicationId(), configserverConfig.multitenant(), ConfigServerSpec.fromConfig(configserverConfig), HostName.from(configserverConfig.loadBalancerAddress()), configserverConfig.ztsUrl() != null ? URI.create(configserverConfig.ztsUrl()) : null, configserverConfig.athenzDnsSuffix(), configserverConfig.hostedVespa(), zone, rotationsSet, params.isBootstrap(), ! currentActiveApplicationSet.isPresent(), context.getFlagSource()); this.preparedModelsBuilder = new PreparedModelsBuilder(modelFactoryRegistry, permanentApplicationPackage, configDefinitionRepo, fileDistributionFactory, hostProvisionerProvider, context, logger, params, currentActiveApplicationSet, properties, configserverConfig); } void checkTimeout(String step) { if (! params.getTimeoutBudget().hasTimeLeft()) { String used = params.getTimeoutBudget().timesUsed(); throw new RuntimeException("prepare timed out "+used+" after "+step+" step: " + applicationId); } } void preprocess() { try { this.applicationPackage = context.getApplicationPackage().preprocess(properties.zone(), logger); } catch (IOException | TransformerException | ParserConfigurationException | SAXException e) { throw new IllegalArgumentException("Error preprocessing application package for " + applicationId, e); } checkTimeout("preprocess"); } AllocatedHosts buildModels(Instant now) { SettableOptional<AllocatedHosts> allocatedHosts = new SettableOptional<>(); this.modelResultList = preparedModelsBuilder.buildModels(applicationId, vespaVersion, applicationPackage, allocatedHosts, now); checkTimeout("build models"); return allocatedHosts.get(); } void makeResult(AllocatedHosts allocatedHosts) { this.prepareResult = new PrepareResult(allocatedHosts, modelResultList); checkTimeout("making result from models"); } void writeStateZK() { log.log(LogLevel.DEBUG, "Writing application package state to zookeeper"); writeStateToZooKeeper(context.getSessionZooKeeperClient(), applicationPackage, applicationId, vespaVersion, logger, prepareResult.getFileRegistries(), prepareResult.allocatedHosts()); checkTimeout("write state to zookeeper"); } void writeRotZK() { rotations.writeRotationsToZooKeeper(applicationId, rotationsSet); checkTimeout("write rotations to zookeeper"); } void distribute() { prepareResult.asList().forEach(modelResult -> modelResult.model .distributeFiles(modelResult.fileDistributionProvider.getFileDistribution())); checkTimeout("distribute files"); } ConfigChangeActions result() { return prepareResult.getConfigChangeActions(); } private Set<Rotation> getRotations(Set<Rotation> rotations) { if (rotations == null || rotations.isEmpty()) { rotations = this.rotations.readRotationsFromZooKeeper(applicationId); } return rotations; } }
class Preparation { final SessionContext context; final DeployLogger logger; final PrepareParams params; final Optional<ApplicationSet> currentActiveApplicationSet; final Path tenantPath; final ApplicationId applicationId; /** The version of Vespa the application to be prepared specifies for its nodes */ final com.yahoo.component.Version vespaVersion; final Rotations rotations; final ContainerEndpointsCache containerEndpoints; final Set<Rotation> rotationsSet; final ModelContext.Properties properties; private ApplicationPackage applicationPackage; private List<PreparedModelsBuilder.PreparedModelResult> modelResultList; private PrepareResult prepareResult; private final PreparedModelsBuilder preparedModelsBuilder; Preparation(SessionContext context, DeployLogger logger, PrepareParams params, Optional<ApplicationSet> currentActiveApplicationSet, Path tenantPath) { this.context = context; this.logger = logger; this.params = params; this.currentActiveApplicationSet = currentActiveApplicationSet; this.tenantPath = tenantPath; this.applicationId = params.getApplicationId(); this.vespaVersion = params.vespaVersion().orElse(Vtag.currentVersion); this.rotations = new Rotations(curator, tenantPath); this.containerEndpoints = new ContainerEndpointsCache(tenantPath, curator); this.rotationsSet = getRotations(params.rotations()); this.properties = new ModelContextImpl.Properties(params.getApplicationId(), configserverConfig.multitenant(), ConfigServerSpec.fromConfig(configserverConfig), HostName.from(configserverConfig.loadBalancerAddress()), configserverConfig.ztsUrl() != null ? URI.create(configserverConfig.ztsUrl()) : null, configserverConfig.athenzDnsSuffix(), configserverConfig.hostedVespa(), zone, rotationsSet, params.isBootstrap(), ! currentActiveApplicationSet.isPresent(), context.getFlagSource()); this.preparedModelsBuilder = new PreparedModelsBuilder(modelFactoryRegistry, permanentApplicationPackage, configDefinitionRepo, fileDistributionFactory, hostProvisionerProvider, context, logger, params, currentActiveApplicationSet, properties, configserverConfig); } void checkTimeout(String step) { if (! params.getTimeoutBudget().hasTimeLeft()) { String used = params.getTimeoutBudget().timesUsed(); throw new RuntimeException("prepare timed out "+used+" after "+step+" step: " + applicationId); } } void preprocess() { try { this.applicationPackage = context.getApplicationPackage().preprocess(properties.zone(), logger); } catch (IOException | TransformerException | ParserConfigurationException | SAXException e) { throw new IllegalArgumentException("Error preprocessing application package for " + applicationId, e); } checkTimeout("preprocess"); } AllocatedHosts buildModels(Instant now) { SettableOptional<AllocatedHosts> allocatedHosts = new SettableOptional<>(); this.modelResultList = preparedModelsBuilder.buildModels(applicationId, vespaVersion, applicationPackage, allocatedHosts, now); checkTimeout("build models"); return allocatedHosts.get(); } void makeResult(AllocatedHosts allocatedHosts) { this.prepareResult = new PrepareResult(allocatedHosts, modelResultList); checkTimeout("making result from models"); } void writeStateZK() { log.log(LogLevel.DEBUG, "Writing application package state to zookeeper"); writeStateToZooKeeper(context.getSessionZooKeeperClient(), applicationPackage, applicationId, vespaVersion, logger, prepareResult.getFileRegistries(), prepareResult.allocatedHosts()); checkTimeout("write state to zookeeper"); } void writeRotZK() { rotations.writeRotationsToZooKeeper(applicationId, rotationsSet); checkTimeout("write rotations to zookeeper"); } void distribute() { prepareResult.asList().forEach(modelResult -> modelResult.model .distributeFiles(modelResult.fileDistributionProvider.getFileDistribution())); checkTimeout("distribute files"); } ConfigChangeActions result() { return prepareResult.getConfigChangeActions(); } private Set<Rotation> getRotations(Set<Rotation> rotations) { if (rotations == null || rotations.isEmpty()) { rotations = this.rotations.readRotationsFromZooKeeper(applicationId); } return rotations; } }
True, thanks!
void writeContainerEndpointsZK(Optional<String> globalServiceId) { if (!params.containerEndpoints().isEmpty()) { containerEndpoints.write(applicationId, params.containerEndpoints()); } else { if (globalServiceId.isEmpty()) { log.log(LogLevel.WARNING, "Want to write rotations " + rotationsSet + " as container endpoints, but " + applicationId + " has no global-service-id. This should not happen"); return; } containerEndpoints.write(applicationId, toContainerEndpoints(globalServiceId.get(), rotationsSet)); } checkTimeout("write container endpoints to zookeeper"); }
log.log(LogLevel.WARNING, "Want to write rotations " + rotationsSet + " as container endpoints, but " + applicationId + " has no global-service-id. This should not happen");
void writeContainerEndpointsZK(Optional<String> globalServiceId) { if (!params.containerEndpoints().isEmpty()) { containerEndpoints.write(applicationId, params.containerEndpoints()); } else { if (!rotationsSet.isEmpty()) { if (globalServiceId.isEmpty()) { log.log(LogLevel.WARNING, "Want to write rotations " + rotationsSet + " as container endpoints, but " + applicationId + " has no global-service-id. This should not happen"); return; } containerEndpoints.write(applicationId, toContainerEndpoints(globalServiceId.get(), rotationsSet)); } } checkTimeout("write container endpoints to zookeeper"); }
class Preparation { final SessionContext context; final DeployLogger logger; final PrepareParams params; final Optional<ApplicationSet> currentActiveApplicationSet; final Path tenantPath; final ApplicationId applicationId; /** The version of Vespa the application to be prepared specifies for its nodes */ final com.yahoo.component.Version vespaVersion; final Rotations rotations; final ContainerEndpointsCache containerEndpoints; final Set<Rotation> rotationsSet; final ModelContext.Properties properties; private ApplicationPackage applicationPackage; private List<PreparedModelsBuilder.PreparedModelResult> modelResultList; private PrepareResult prepareResult; private final PreparedModelsBuilder preparedModelsBuilder; Preparation(SessionContext context, DeployLogger logger, PrepareParams params, Optional<ApplicationSet> currentActiveApplicationSet, Path tenantPath) { this.context = context; this.logger = logger; this.params = params; this.currentActiveApplicationSet = currentActiveApplicationSet; this.tenantPath = tenantPath; this.applicationId = params.getApplicationId(); this.vespaVersion = params.vespaVersion().orElse(Vtag.currentVersion); this.rotations = new Rotations(curator, tenantPath); this.containerEndpoints = new ContainerEndpointsCache(tenantPath, curator); this.rotationsSet = getRotations(params.rotations()); this.properties = new ModelContextImpl.Properties(params.getApplicationId(), configserverConfig.multitenant(), ConfigServerSpec.fromConfig(configserverConfig), HostName.from(configserverConfig.loadBalancerAddress()), configserverConfig.ztsUrl() != null ? URI.create(configserverConfig.ztsUrl()) : null, configserverConfig.athenzDnsSuffix(), configserverConfig.hostedVespa(), zone, rotationsSet, params.isBootstrap(), ! currentActiveApplicationSet.isPresent(), context.getFlagSource()); this.preparedModelsBuilder = new PreparedModelsBuilder(modelFactoryRegistry, permanentApplicationPackage, configDefinitionRepo, fileDistributionFactory, hostProvisionerProvider, context, logger, params, currentActiveApplicationSet, properties, configserverConfig); } void checkTimeout(String step) { if (! params.getTimeoutBudget().hasTimeLeft()) { String used = params.getTimeoutBudget().timesUsed(); throw new RuntimeException("prepare timed out "+used+" after "+step+" step: " + applicationId); } } void preprocess() { try { this.applicationPackage = context.getApplicationPackage().preprocess(properties.zone(), logger); } catch (IOException | TransformerException | ParserConfigurationException | SAXException e) { throw new IllegalArgumentException("Error preprocessing application package for " + applicationId, e); } checkTimeout("preprocess"); } AllocatedHosts buildModels(Instant now) { SettableOptional<AllocatedHosts> allocatedHosts = new SettableOptional<>(); this.modelResultList = preparedModelsBuilder.buildModels(applicationId, vespaVersion, applicationPackage, allocatedHosts, now); checkTimeout("build models"); return allocatedHosts.get(); } void makeResult(AllocatedHosts allocatedHosts) { this.prepareResult = new PrepareResult(allocatedHosts, modelResultList); checkTimeout("making result from models"); } void writeStateZK() { log.log(LogLevel.DEBUG, "Writing application package state to zookeeper"); writeStateToZooKeeper(context.getSessionZooKeeperClient(), applicationPackage, applicationId, vespaVersion, logger, prepareResult.getFileRegistries(), prepareResult.allocatedHosts()); checkTimeout("write state to zookeeper"); } void writeRotZK() { rotations.writeRotationsToZooKeeper(applicationId, rotationsSet); checkTimeout("write rotations to zookeeper"); } void distribute() { prepareResult.asList().forEach(modelResult -> modelResult.model .distributeFiles(modelResult.fileDistributionProvider.getFileDistribution())); checkTimeout("distribute files"); } ConfigChangeActions result() { return prepareResult.getConfigChangeActions(); } private Set<Rotation> getRotations(Set<Rotation> rotations) { if (rotations == null || rotations.isEmpty()) { rotations = this.rotations.readRotationsFromZooKeeper(applicationId); } return rotations; } }
class Preparation { final SessionContext context; final DeployLogger logger; final PrepareParams params; final Optional<ApplicationSet> currentActiveApplicationSet; final Path tenantPath; final ApplicationId applicationId; /** The version of Vespa the application to be prepared specifies for its nodes */ final com.yahoo.component.Version vespaVersion; final Rotations rotations; final ContainerEndpointsCache containerEndpoints; final Set<Rotation> rotationsSet; final ModelContext.Properties properties; private ApplicationPackage applicationPackage; private List<PreparedModelsBuilder.PreparedModelResult> modelResultList; private PrepareResult prepareResult; private final PreparedModelsBuilder preparedModelsBuilder; Preparation(SessionContext context, DeployLogger logger, PrepareParams params, Optional<ApplicationSet> currentActiveApplicationSet, Path tenantPath) { this.context = context; this.logger = logger; this.params = params; this.currentActiveApplicationSet = currentActiveApplicationSet; this.tenantPath = tenantPath; this.applicationId = params.getApplicationId(); this.vespaVersion = params.vespaVersion().orElse(Vtag.currentVersion); this.rotations = new Rotations(curator, tenantPath); this.containerEndpoints = new ContainerEndpointsCache(tenantPath, curator); this.rotationsSet = getRotations(params.rotations()); this.properties = new ModelContextImpl.Properties(params.getApplicationId(), configserverConfig.multitenant(), ConfigServerSpec.fromConfig(configserverConfig), HostName.from(configserverConfig.loadBalancerAddress()), configserverConfig.ztsUrl() != null ? URI.create(configserverConfig.ztsUrl()) : null, configserverConfig.athenzDnsSuffix(), configserverConfig.hostedVespa(), zone, rotationsSet, params.isBootstrap(), ! currentActiveApplicationSet.isPresent(), context.getFlagSource()); this.preparedModelsBuilder = new PreparedModelsBuilder(modelFactoryRegistry, permanentApplicationPackage, configDefinitionRepo, fileDistributionFactory, hostProvisionerProvider, context, logger, params, currentActiveApplicationSet, properties, configserverConfig); } void checkTimeout(String step) { if (! params.getTimeoutBudget().hasTimeLeft()) { String used = params.getTimeoutBudget().timesUsed(); throw new RuntimeException("prepare timed out "+used+" after "+step+" step: " + applicationId); } } void preprocess() { try { this.applicationPackage = context.getApplicationPackage().preprocess(properties.zone(), logger); } catch (IOException | TransformerException | ParserConfigurationException | SAXException e) { throw new IllegalArgumentException("Error preprocessing application package for " + applicationId, e); } checkTimeout("preprocess"); } AllocatedHosts buildModels(Instant now) { SettableOptional<AllocatedHosts> allocatedHosts = new SettableOptional<>(); this.modelResultList = preparedModelsBuilder.buildModels(applicationId, vespaVersion, applicationPackage, allocatedHosts, now); checkTimeout("build models"); return allocatedHosts.get(); } void makeResult(AllocatedHosts allocatedHosts) { this.prepareResult = new PrepareResult(allocatedHosts, modelResultList); checkTimeout("making result from models"); } void writeStateZK() { log.log(LogLevel.DEBUG, "Writing application package state to zookeeper"); writeStateToZooKeeper(context.getSessionZooKeeperClient(), applicationPackage, applicationId, vespaVersion, logger, prepareResult.getFileRegistries(), prepareResult.allocatedHosts()); checkTimeout("write state to zookeeper"); } void writeRotZK() { rotations.writeRotationsToZooKeeper(applicationId, rotationsSet); checkTimeout("write rotations to zookeeper"); } void distribute() { prepareResult.asList().forEach(modelResult -> modelResult.model .distributeFiles(modelResult.fileDistributionProvider.getFileDistribution())); checkTimeout("distribute files"); } ConfigChangeActions result() { return prepareResult.getConfigChangeActions(); } private Set<Rotation> getRotations(Set<Rotation> rotations) { if (rotations == null || rotations.isEmpty()) { rotations = this.rotations.readRotationsFromZooKeeper(applicationId); } return rotations; } }
Shouldn't this be ``` Defaults.getDefaults().underVespaHome("lib/jars/vespa-testrunner-components-jar-with-dependencies.jar") ``` ?
private void addTestrunnerComponentsIfTester(DeployState deployState) { if (deployState.getProperties().applicationId().instance().isTester()) addPlatformBundle(Paths.get(Defaults.getDefaults().underVespaHome("vespa-testrunner-components-jar-with-dependencies.jar"))); }
addPlatformBundle(Paths.get(Defaults.getDefaults().underVespaHome("vespa-testrunner-components-jar-with-dependencies.jar")));
private void addTestrunnerComponentsIfTester(DeployState deployState) { if (deployState.getProperties().applicationId().instance().isTester()) addPlatformBundle(Paths.get(Defaults.getDefaults().underVespaHome("vespa-testrunner-components-jar-with-dependencies.jar"))); }
class ContainerCluster<CONTAINER extends Container> extends AbstractConfigProducer<AbstractConfigProducer<?>> implements ComponentsConfig.Producer, JdiscBindingsConfig.Producer, DocumentmanagerConfig.Producer, ContainerMbusConfig.Producer, ContainerDocumentConfig.Producer, HealthMonitorConfig.Producer, ApplicationMetadataConfig.Producer, BundlesConfig.Producer, IndexInfoConfig.Producer, IlscriptsConfig.Producer, SchemamappingConfig.Producer, QrSearchersConfig.Producer, QrStartConfig.Producer, QueryProfilesConfig.Producer, PageTemplatesConfig.Producer, SemanticRulesConfig.Producer, DocprocConfig.Producer, ClusterInfoConfig.Producer, RoutingProviderConfig.Producer, ConfigserverConfig.Producer { /** * URI prefix used for internal, usually programmatic, APIs. URIs using this * prefix should never considered available for direct use by customers, and * normal compatibility concerns only applies to libraries using the URIs in * question, not contents served from the URIs themselves. */ public static final String RESERVED_URI_PREFIX = "reserved-for-internal-use"; public static final String APPLICATION_STATUS_HANDLER_CLASS = "com.yahoo.container.handler.observability.ApplicationStatusHandler"; public static final String BINDINGS_OVERVIEW_HANDLER_CLASS = BindingsOverviewHandler.class.getName(); public static final String STATE_HANDLER_CLASS = "com.yahoo.container.jdisc.state.StateHandler"; public static final String STATISTICS_HANDLER_CLASS = "com.yahoo.container.config.StatisticsRequestHandler"; public static final String LOG_HANDLER_CLASS = com.yahoo.container.handler.LogHandler.class.getName(); public static final String DEFAULT_LINGUISTICS_PROVIDER = "com.yahoo.language.provider.DefaultLinguisticsProvider"; public static final String CMS = "-XX:+UseConcMarkSweepGC -XX:MaxTenuringThreshold=15 -XX:NewRatio=1"; public static final String G1GC = "-XX:+UseG1GC -XX:MaxTenuringThreshold=15"; public static final String ROOT_HANDLER_BINDING = "*: private final String name; protected List<CONTAINER> containers = new ArrayList<>(); private Http http; private ProcessingChains processingChains; private ContainerSearch containerSearch; private ContainerDocproc containerDocproc; private ContainerDocumentApi containerDocumentApi; private SecretStore secretStore; private MbusParams mbusParams; private boolean messageBusEnabled = true; private boolean rpcServerEnabled = true; private boolean httpServerEnabled = true; private final Set<Path> platformBundles = new LinkedHashSet<>(); private final List<String> serviceAliases = new ArrayList<>(); private final List<String> endpointAliases = new ArrayList<>(); private final ComponentGroup<Component<?, ?>> componentGroup; private final boolean isHostedVespa; private Map<String, String> concreteDocumentTypes = new LinkedHashMap<>(); private ApplicationMetaData applicationMetaData = null; /** The zone this is deployed in, or the default zone if not on hosted Vespa */ private Zone zone; private String hostClusterId = null; private String jvmGCOptions = null; private String environmentVars = null; private Integer memoryPercentage = null; public ContainerCluster(AbstractConfigProducer<?> parent, String subId, String name, DeployState deployState) { super(parent, subId); this.name = name; this.isHostedVespa = stateIsHosted(deployState); this.zone = (deployState != null) ? deployState.zone() : Zone.defaultZone(); componentGroup = new ComponentGroup<>(this, "component"); addComponent(new StatisticsComponent()); addSimpleComponent(AccessLog.class); addSimpleComponent(ThreadPoolProvider.class); addSimpleComponent(com.yahoo.concurrent.classlock.ClassLocking.class); addSimpleComponent(SecurityFilterInvoker.class); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricConsumerProviderProvider"); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricProvider"); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricUpdater"); addSimpleComponent(com.yahoo.container.jdisc.LoggingRequestHandler.Context.class); addSimpleComponent(com.yahoo.metrics.simple.MetricManager.class.getName(), null, MetricProperties.BUNDLE_SYMBOLIC_NAME); addSimpleComponent(com.yahoo.metrics.simple.jdisc.JdiscMetricsFactory.class.getName(), null, MetricProperties.BUNDLE_SYMBOLIC_NAME); addSimpleComponent("com.yahoo.container.jdisc.state.StateMonitor"); addSimpleComponent("com.yahoo.container.jdisc.ContainerThreadFactory"); addSimpleComponent("com.yahoo.container.handler.VipStatus"); addSimpleComponent(com.yahoo.container.handler.ClustersStatus.class.getName()); addJaxProviders(); addTestrunnerComponentsIfTester(deployState); } public void setZone(Zone zone) { this.zone = zone; } public Zone getZone() { return zone; } public void addDefaultHandlersWithVip() { addDefaultHandlersExceptStatus(); addVipHandler(); } public final void addDefaultHandlersExceptStatus() { addDefaultRootHandler(); addMetricStateHandler(); addApplicationStatusHandler(); } public void addMetricStateHandler() { Handler<AbstractConfigProducer<?>> stateHandler = new Handler<>( new ComponentModel(STATE_HANDLER_CLASS, null, null, null)); stateHandler.addServerBindings("http: "http: addComponent(stateHandler); } public void addDefaultRootHandler() { if (hasHandlerWithBinding(ROOT_HANDLER_BINDING)) return; Handler<AbstractConfigProducer<?>> handler = new Handler<>( new ComponentModel(BundleInstantiationSpecification.getFromStrings( BINDINGS_OVERVIEW_HANDLER_CLASS, null, null), null)); handler.addServerBindings(ROOT_HANDLER_BINDING); addComponent(handler); } private boolean hasHandlerWithBinding(String binding) { Collection<Handler<?>> handlers = getHandlers(); for (Handler handler : handlers) { if (handler.getServerBindings().contains(binding)) return true; } return false; } public void addApplicationStatusHandler() { Handler<AbstractConfigProducer<?>> statusHandler = new Handler<>( new ComponentModel(BundleInstantiationSpecification.getInternalHandlerSpecificationFromStrings( APPLICATION_STATUS_HANDLER_CLASS, null), null)); statusHandler.addServerBindings("http: addComponent(statusHandler); } public void addVipHandler() { Handler<?> vipHandler = Handler.fromClassName(FileStatusHandlerComponent.CLASS); vipHandler.addServerBindings("http: addComponent(vipHandler); } @SuppressWarnings("deprecation") private void addJaxProviders() { addSimpleComponent(com.yahoo.container.xml.providers.DatatypeFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.DocumentBuilderFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.SAXParserFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.SchemaFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.TransformerFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLEventFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLInputFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLOutputFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XPathFactoryProvider.class); } public final void addComponent(Component<?, ?> component) { componentGroup.addComponent(component); } public final void addSimpleComponent(String idSpec, String classSpec, String bundleSpec) { addComponent(new SimpleComponent(new ComponentModel(idSpec, classSpec, bundleSpec))); } /** * Removes a component by id * * @return the removed component, or null if it was not present */ public Component removeComponent(ComponentId componentId) { return componentGroup.removeComponent(componentId); } private void addSimpleComponent(Class<?> clazz) { addSimpleComponent(clazz.getName()); } protected void addSimpleComponent(String className) { addComponent(new SimpleComponent(className)); } public void prepare(DeployState deployState) { applicationMetaData = deployState.getApplicationPackage().getMetaData(); doPrepare(deployState); } protected abstract void doPrepare(DeployState deployState); public void addMbusServer(ComponentId chainId) { ComponentId serviceId = chainId.nestInNamespace(ComponentId.fromString("MbusServer")); addComponent( new Component<>(new ComponentModel(new BundleInstantiationSpecification( serviceId, ComponentSpecification.fromString(MbusServerProvider.class.getName()), null)))); } public String getName() { return name; } public List<CONTAINER> getContainers() { return Collections.unmodifiableList(containers); } public void addContainer(CONTAINER container) { container.setClusterName(name); container.setProp("clustername", name) .setProp("index", this.containers.size()); containers.add(container); } public void addContainers(Collection<CONTAINER> containers) { containers.forEach(this::addContainer); } public void setProcessingChains(ProcessingChains processingChains, String... serverBindings) { if (this.processingChains != null) throw new IllegalStateException("ProcessingChains should only be set once."); this.processingChains = processingChains; ProcessingHandler<?> processingHandler = new ProcessingHandler<>( processingChains, "com.yahoo.processing.handler.ProcessingHandler"); for (String binding: serverBindings) processingHandler.addServerBindings(binding); addComponent(processingHandler); } ProcessingChains getProcessingChains() { return processingChains; } @NonNull public SearchChains getSearchChains() { if (containerSearch == null) throw new IllegalStateException("Search components not found in container cluster '" + getSubId() + "': Add <search/> to the cluster in services.xml"); return containerSearch.getChains(); } @Nullable public ContainerSearch getSearch() { return containerSearch; } public void setSearch(ContainerSearch containerSearch) { this.containerSearch = containerSearch; } public void setHttp(Http http) { this.http = http; addChild(http); } @Nullable public Http getHttp() { return http; } @Nullable public ContainerDocproc getDocproc() { return containerDocproc; } public void setDocproc(ContainerDocproc containerDocproc) { this.containerDocproc = containerDocproc; } @Nullable public ContainerDocumentApi getDocumentApi() { return containerDocumentApi; } public void setDocumentApi(ContainerDocumentApi containerDocumentApi) { this.containerDocumentApi = containerDocumentApi; } @NonNull public DocprocChains getDocprocChains() { if (containerDocproc == null) throw new IllegalStateException("Document processing components not found in container cluster '" + getSubId() + "': Add <document-processing/> to the cluster in services.xml"); return containerDocproc.getChains(); } @SuppressWarnings("unchecked") public Collection<Handler<?>> getHandlers() { return (Collection<Handler<?>>)(Collection)componentGroup.getComponents(Handler.class); } public void setSecretStore(SecretStore secretStore) { this.secretStore = secretStore; } public Optional<SecretStore> getSecretStore() { return Optional.ofNullable(secretStore); } public Map<ComponentId, Component<?, ?>> getComponentsMap() { return componentGroup.getComponentMap(); } /** Returns all components in this cluster (generic, handlers, chained) */ public Collection<Component<?, ?>> getAllComponents() { List<Component<?, ?>> allComponents = new ArrayList<>(); recursivelyFindAllComponents(allComponents, this); Collections.sort(allComponents); return Collections.unmodifiableCollection(allComponents); } private void recursivelyFindAllComponents(Collection<Component<?, ?>> allComponents, AbstractConfigProducer<?> current) { for (AbstractConfigProducer<?> child: current.getChildren().values()) { if (child instanceof Component) allComponents.add((Component<?, ?>) child); if (!(child instanceof Container)) recursivelyFindAllComponents(allComponents, child); } } @Override public void getConfig(ComponentsConfig.Builder builder) { builder.components.addAll(ComponentsConfigGenerator.generate(getAllComponents())); builder.components(new ComponentsConfig.Components.Builder().id("com.yahoo.container.core.config.HandlersConfigurerDi$RegistriesHack")); } @Override public void getConfig(JdiscBindingsConfig.Builder builder) { builder.handlers.putAll(DiscBindingsConfigGenerator.generate(getHandlers())); } @Override public void getConfig(DocumentmanagerConfig.Builder builder) { if (containerDocproc != null && containerDocproc.isCompressDocuments()) builder.enablecompression(true); } @Override public void getConfig(ContainerDocumentConfig.Builder builder) { for (Map.Entry<String, String> e : concreteDocumentTypes.entrySet()) { ContainerDocumentConfig.Doctype.Builder dtb = new ContainerDocumentConfig.Doctype.Builder(); dtb.type(e.getKey()); dtb.factorycomponent(e.getValue()); builder.doctype(dtb); } } @Override public void getConfig(HealthMonitorConfig.Builder builder) { Monitoring monitoring = getMonitoringService(); if (monitoring != null) { builder.snapshot_interval(monitoring.getIntervalSeconds()); } } @Override public void getConfig(ApplicationMetadataConfig.Builder builder) { if (applicationMetaData != null) { builder.name(applicationMetaData.getApplicationName()). user(applicationMetaData.getDeployedByUser()). path(applicationMetaData.getDeployPath()). timestamp(applicationMetaData.getDeployTimestamp()). checksum(applicationMetaData.getCheckSum()). generation(applicationMetaData.getGeneration()); } } /** * Adds a bundle present at a known location at the target container nodes. * * @param bundlePath usually an absolute path, e.g. '$VESPA_HOME/lib/jars/foo.jar' */ public final void addPlatformBundle(Path bundlePath) { platformBundles.add(bundlePath); } @Override public void getConfig(BundlesConfig.Builder builder) { platformBundles.stream() .map(ContainerCluster::toFileReferenceString) .forEach(builder::bundle); } private static String toFileReferenceString(Path path) { return DISK_BUNDLE_PREFIX + path.toString(); } @Override public void getConfig(QrSearchersConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(QrStartConfig.Builder builder) { QrStartConfig.Jvm.Builder jvmBuilder = builder.jvm; if (getMemoryPercentage().isPresent()) { jvmBuilder.heapSizeAsPercentageOfPhysicalMemory(getMemoryPercentage().get()); } else if (isHostedVespa()) { jvmBuilder.heapSizeAsPercentageOfPhysicalMemory(getHostClusterId().isPresent() ? 17 : 60); } jvmBuilder.gcopts(Objects.requireNonNullElse(jvmGCOptions, G1GC)); if (environmentVars != null) { builder.qrs.env(environmentVars); } } @Override public void getConfig(DocprocConfig.Builder builder) { if (containerDocproc != null) containerDocproc.getConfig(builder); } @Override public void getConfig(PageTemplatesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(SemanticRulesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(QueryProfilesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(SchemamappingConfig.Builder builder) { if (containerDocproc != null) containerDocproc.getConfig(builder); } @Override public void getConfig(IndexInfoConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(ContainerMbusConfig.Builder builder) { if (mbusParams != null) { if (mbusParams.maxConcurrentFactor != null) builder.maxConcurrentFactor(mbusParams.maxConcurrentFactor); if (mbusParams.documentExpansionFactor != null) builder.documentExpansionFactor(mbusParams.documentExpansionFactor); if (mbusParams.containerCoreMemory != null) builder.containerCoreMemory(mbusParams.containerCoreMemory); } if (containerDocproc != null) containerDocproc.getConfig(builder); } public void setMbusParams(MbusParams mbusParams) { this.mbusParams = mbusParams; } public void initialize(Map<String, AbstractSearchCluster> clusterMap) { if (containerSearch != null) containerSearch.connectSearchClusters(clusterMap); } public void addDefaultSearchAccessLog() { addComponent(new AccessLogComponent(AccessLogComponent.AccessLogType.jsonAccessLog, getName(), isHostedVespa)); } @Override public void getConfig(IlscriptsConfig.Builder builder) { List<AbstractSearchCluster> searchClusters = new ArrayList<>(); searchClusters.addAll(Content.getSearchClusters(getRoot().configModelRepo())); for (AbstractSearchCluster searchCluster : searchClusters) { searchCluster.getConfig(builder); } } @Override public void getConfig(ClusterInfoConfig.Builder builder) { builder.clusterId(name); builder.nodeCount(containers.size()); for (Service service : getDescendantServices()) { builder.services.add(new ClusterInfoConfig.Services.Builder() .index(Integer.parseInt(service.getServicePropertyString("index", "99999"))) .hostname(service.getHostName()) .ports(getPorts(service))); } } /** * Returns a config server config containing the right zone settings (and defaults for the rest). * This is useful to allow applications to find out in which zone they are runnung by having the Zone * object (which is constructed from this config) injected. */ @Override public void getConfig(ConfigserverConfig.Builder builder) { builder.system(zone.system().value()); builder.environment(zone.environment().value()); builder.region(zone.region().value()); } private List<ClusterInfoConfig.Services.Ports.Builder> getPorts(Service service) { List<ClusterInfoConfig.Services.Ports.Builder> builders = new ArrayList<>(); PortsMeta portsMeta = service.getPortsMeta(); for (int i = 0; i < portsMeta.getNumPorts(); i++) { builders.add(new ClusterInfoConfig.Services.Ports.Builder() .number(service.getRelativePort(i)) .tags(ApplicationConfigProducerRoot.getPortTags(portsMeta, i)) ); } return builders; } public boolean isHostedVespa() { return isHostedVespa; } @Override public void getConfig(RoutingProviderConfig.Builder builder) { builder.enabled(isHostedVespa); } public Map<String, String> concreteDocumentTypes() { return concreteDocumentTypes; } /** The configured service aliases for the service in this cluster */ public List<String> serviceAliases() { return serviceAliases; } /** The configured endpoint aliases (fqdn) for the service in this cluster */ public List<String> endpointAliases() { return endpointAliases; } public void setHostClusterId(String clusterId) { hostClusterId = clusterId; } /** * Returns the id of the content cluster which hosts this container cluster, if any. * This is only set with hosted clusters where this container cluster is set up to run on the nodes * of a content cluster. */ public Optional<String> getHostClusterId() { return Optional.ofNullable(hostClusterId); } public void setMemoryPercentage(Integer memoryPercentage) { this.memoryPercentage = memoryPercentage; } public void setJvmGCOptions(String opts) { this.jvmGCOptions = opts; } public void setEnvironmentVars(String environmentVars) { this.environmentVars = environmentVars; } public Optional<String> getJvmGCOptions() { return Optional.ofNullable(jvmGCOptions); } /** * Returns the percentage of host physical memory this application has specified for nodes in this cluster, * or empty if this is not specified by the application. */ public Optional<Integer> getMemoryPercentage() { return Optional.ofNullable(memoryPercentage); } public final void setMessageBusEnabled(boolean messageBusEnabled) { this.messageBusEnabled = messageBusEnabled; } boolean messageBusEnabled() { return messageBusEnabled; } public final void setRpcServerEnabled(boolean rpcServerEnabled) { this.rpcServerEnabled = rpcServerEnabled; } boolean rpcServerEnabled() { return rpcServerEnabled; } boolean httpServerEnabled() { return httpServerEnabled; } public void setHttpServerEnabled(boolean httpServerEnabled) { this.httpServerEnabled = httpServerEnabled; } @Override public String toString() { return "container cluster '" + getName() + "'"; } public static class MbusParams { final Double maxConcurrentFactor; final Double documentExpansionFactor; final Integer containerCoreMemory; public MbusParams(Double maxConcurrentFactor, Double documentExpansionFactor, Integer containerCoreMemory) { this.maxConcurrentFactor = maxConcurrentFactor; this.documentExpansionFactor = documentExpansionFactor; this.containerCoreMemory = containerCoreMemory; } } }
class ContainerCluster<CONTAINER extends Container> extends AbstractConfigProducer<AbstractConfigProducer<?>> implements ComponentsConfig.Producer, JdiscBindingsConfig.Producer, DocumentmanagerConfig.Producer, ContainerMbusConfig.Producer, ContainerDocumentConfig.Producer, HealthMonitorConfig.Producer, ApplicationMetadataConfig.Producer, BundlesConfig.Producer, IndexInfoConfig.Producer, IlscriptsConfig.Producer, SchemamappingConfig.Producer, QrSearchersConfig.Producer, QrStartConfig.Producer, QueryProfilesConfig.Producer, PageTemplatesConfig.Producer, SemanticRulesConfig.Producer, DocprocConfig.Producer, ClusterInfoConfig.Producer, RoutingProviderConfig.Producer, ConfigserverConfig.Producer { /** * URI prefix used for internal, usually programmatic, APIs. URIs using this * prefix should never considered available for direct use by customers, and * normal compatibility concerns only applies to libraries using the URIs in * question, not contents served from the URIs themselves. */ public static final String RESERVED_URI_PREFIX = "reserved-for-internal-use"; public static final String APPLICATION_STATUS_HANDLER_CLASS = "com.yahoo.container.handler.observability.ApplicationStatusHandler"; public static final String BINDINGS_OVERVIEW_HANDLER_CLASS = BindingsOverviewHandler.class.getName(); public static final String STATE_HANDLER_CLASS = "com.yahoo.container.jdisc.state.StateHandler"; public static final String STATISTICS_HANDLER_CLASS = "com.yahoo.container.config.StatisticsRequestHandler"; public static final String LOG_HANDLER_CLASS = com.yahoo.container.handler.LogHandler.class.getName(); public static final String DEFAULT_LINGUISTICS_PROVIDER = "com.yahoo.language.provider.DefaultLinguisticsProvider"; public static final String CMS = "-XX:+UseConcMarkSweepGC -XX:MaxTenuringThreshold=15 -XX:NewRatio=1"; public static final String G1GC = "-XX:+UseG1GC -XX:MaxTenuringThreshold=15"; public static final String ROOT_HANDLER_BINDING = "*: private final String name; protected List<CONTAINER> containers = new ArrayList<>(); private Http http; private ProcessingChains processingChains; private ContainerSearch containerSearch; private ContainerDocproc containerDocproc; private ContainerDocumentApi containerDocumentApi; private SecretStore secretStore; private MbusParams mbusParams; private boolean messageBusEnabled = true; private boolean rpcServerEnabled = true; private boolean httpServerEnabled = true; private final Set<Path> platformBundles = new LinkedHashSet<>(); private final List<String> serviceAliases = new ArrayList<>(); private final List<String> endpointAliases = new ArrayList<>(); private final ComponentGroup<Component<?, ?>> componentGroup; private final boolean isHostedVespa; private Map<String, String> concreteDocumentTypes = new LinkedHashMap<>(); private ApplicationMetaData applicationMetaData = null; /** The zone this is deployed in, or the default zone if not on hosted Vespa */ private Zone zone; private String hostClusterId = null; private String jvmGCOptions = null; private String environmentVars = null; private Integer memoryPercentage = null; public ContainerCluster(AbstractConfigProducer<?> parent, String subId, String name, DeployState deployState) { super(parent, subId); this.name = name; this.isHostedVespa = stateIsHosted(deployState); this.zone = (deployState != null) ? deployState.zone() : Zone.defaultZone(); componentGroup = new ComponentGroup<>(this, "component"); addComponent(new StatisticsComponent()); addSimpleComponent(AccessLog.class); addSimpleComponent(ThreadPoolProvider.class); addSimpleComponent(com.yahoo.concurrent.classlock.ClassLocking.class); addSimpleComponent(SecurityFilterInvoker.class); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricConsumerProviderProvider"); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricProvider"); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricUpdater"); addSimpleComponent(com.yahoo.container.jdisc.LoggingRequestHandler.Context.class); addSimpleComponent(com.yahoo.metrics.simple.MetricManager.class.getName(), null, MetricProperties.BUNDLE_SYMBOLIC_NAME); addSimpleComponent(com.yahoo.metrics.simple.jdisc.JdiscMetricsFactory.class.getName(), null, MetricProperties.BUNDLE_SYMBOLIC_NAME); addSimpleComponent("com.yahoo.container.jdisc.state.StateMonitor"); addSimpleComponent("com.yahoo.container.jdisc.ContainerThreadFactory"); addSimpleComponent("com.yahoo.container.handler.VipStatus"); addSimpleComponent(com.yahoo.container.handler.ClustersStatus.class.getName()); addJaxProviders(); addTestrunnerComponentsIfTester(deployState); } public void setZone(Zone zone) { this.zone = zone; } public Zone getZone() { return zone; } public void addDefaultHandlersWithVip() { addDefaultHandlersExceptStatus(); addVipHandler(); } public final void addDefaultHandlersExceptStatus() { addDefaultRootHandler(); addMetricStateHandler(); addApplicationStatusHandler(); } public void addMetricStateHandler() { Handler<AbstractConfigProducer<?>> stateHandler = new Handler<>( new ComponentModel(STATE_HANDLER_CLASS, null, null, null)); stateHandler.addServerBindings("http: "http: addComponent(stateHandler); } public void addDefaultRootHandler() { if (hasHandlerWithBinding(ROOT_HANDLER_BINDING)) return; Handler<AbstractConfigProducer<?>> handler = new Handler<>( new ComponentModel(BundleInstantiationSpecification.getFromStrings( BINDINGS_OVERVIEW_HANDLER_CLASS, null, null), null)); handler.addServerBindings(ROOT_HANDLER_BINDING); addComponent(handler); } private boolean hasHandlerWithBinding(String binding) { Collection<Handler<?>> handlers = getHandlers(); for (Handler handler : handlers) { if (handler.getServerBindings().contains(binding)) return true; } return false; } public void addApplicationStatusHandler() { Handler<AbstractConfigProducer<?>> statusHandler = new Handler<>( new ComponentModel(BundleInstantiationSpecification.getInternalHandlerSpecificationFromStrings( APPLICATION_STATUS_HANDLER_CLASS, null), null)); statusHandler.addServerBindings("http: addComponent(statusHandler); } public void addVipHandler() { Handler<?> vipHandler = Handler.fromClassName(FileStatusHandlerComponent.CLASS); vipHandler.addServerBindings("http: addComponent(vipHandler); } @SuppressWarnings("deprecation") private void addJaxProviders() { addSimpleComponent(com.yahoo.container.xml.providers.DatatypeFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.DocumentBuilderFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.SAXParserFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.SchemaFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.TransformerFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLEventFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLInputFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLOutputFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XPathFactoryProvider.class); } public final void addComponent(Component<?, ?> component) { componentGroup.addComponent(component); } public final void addSimpleComponent(String idSpec, String classSpec, String bundleSpec) { addComponent(new SimpleComponent(new ComponentModel(idSpec, classSpec, bundleSpec))); } /** * Removes a component by id * * @return the removed component, or null if it was not present */ public Component removeComponent(ComponentId componentId) { return componentGroup.removeComponent(componentId); } private void addSimpleComponent(Class<?> clazz) { addSimpleComponent(clazz.getName()); } protected void addSimpleComponent(String className) { addComponent(new SimpleComponent(className)); } public void prepare(DeployState deployState) { applicationMetaData = deployState.getApplicationPackage().getMetaData(); doPrepare(deployState); } protected abstract void doPrepare(DeployState deployState); public void addMbusServer(ComponentId chainId) { ComponentId serviceId = chainId.nestInNamespace(ComponentId.fromString("MbusServer")); addComponent( new Component<>(new ComponentModel(new BundleInstantiationSpecification( serviceId, ComponentSpecification.fromString(MbusServerProvider.class.getName()), null)))); } public String getName() { return name; } public List<CONTAINER> getContainers() { return Collections.unmodifiableList(containers); } public void addContainer(CONTAINER container) { container.setClusterName(name); container.setProp("clustername", name) .setProp("index", this.containers.size()); containers.add(container); } public void addContainers(Collection<CONTAINER> containers) { containers.forEach(this::addContainer); } public void setProcessingChains(ProcessingChains processingChains, String... serverBindings) { if (this.processingChains != null) throw new IllegalStateException("ProcessingChains should only be set once."); this.processingChains = processingChains; ProcessingHandler<?> processingHandler = new ProcessingHandler<>( processingChains, "com.yahoo.processing.handler.ProcessingHandler"); for (String binding: serverBindings) processingHandler.addServerBindings(binding); addComponent(processingHandler); } ProcessingChains getProcessingChains() { return processingChains; } @NonNull public SearchChains getSearchChains() { if (containerSearch == null) throw new IllegalStateException("Search components not found in container cluster '" + getSubId() + "': Add <search/> to the cluster in services.xml"); return containerSearch.getChains(); } @Nullable public ContainerSearch getSearch() { return containerSearch; } public void setSearch(ContainerSearch containerSearch) { this.containerSearch = containerSearch; } public void setHttp(Http http) { this.http = http; addChild(http); } @Nullable public Http getHttp() { return http; } @Nullable public ContainerDocproc getDocproc() { return containerDocproc; } public void setDocproc(ContainerDocproc containerDocproc) { this.containerDocproc = containerDocproc; } @Nullable public ContainerDocumentApi getDocumentApi() { return containerDocumentApi; } public void setDocumentApi(ContainerDocumentApi containerDocumentApi) { this.containerDocumentApi = containerDocumentApi; } @NonNull public DocprocChains getDocprocChains() { if (containerDocproc == null) throw new IllegalStateException("Document processing components not found in container cluster '" + getSubId() + "': Add <document-processing/> to the cluster in services.xml"); return containerDocproc.getChains(); } @SuppressWarnings("unchecked") public Collection<Handler<?>> getHandlers() { return (Collection<Handler<?>>)(Collection)componentGroup.getComponents(Handler.class); } public void setSecretStore(SecretStore secretStore) { this.secretStore = secretStore; } public Optional<SecretStore> getSecretStore() { return Optional.ofNullable(secretStore); } public Map<ComponentId, Component<?, ?>> getComponentsMap() { return componentGroup.getComponentMap(); } /** Returns all components in this cluster (generic, handlers, chained) */ public Collection<Component<?, ?>> getAllComponents() { List<Component<?, ?>> allComponents = new ArrayList<>(); recursivelyFindAllComponents(allComponents, this); Collections.sort(allComponents); return Collections.unmodifiableCollection(allComponents); } private void recursivelyFindAllComponents(Collection<Component<?, ?>> allComponents, AbstractConfigProducer<?> current) { for (AbstractConfigProducer<?> child: current.getChildren().values()) { if (child instanceof Component) allComponents.add((Component<?, ?>) child); if (!(child instanceof Container)) recursivelyFindAllComponents(allComponents, child); } } @Override public void getConfig(ComponentsConfig.Builder builder) { builder.components.addAll(ComponentsConfigGenerator.generate(getAllComponents())); builder.components(new ComponentsConfig.Components.Builder().id("com.yahoo.container.core.config.HandlersConfigurerDi$RegistriesHack")); } @Override public void getConfig(JdiscBindingsConfig.Builder builder) { builder.handlers.putAll(DiscBindingsConfigGenerator.generate(getHandlers())); } @Override public void getConfig(DocumentmanagerConfig.Builder builder) { if (containerDocproc != null && containerDocproc.isCompressDocuments()) builder.enablecompression(true); } @Override public void getConfig(ContainerDocumentConfig.Builder builder) { for (Map.Entry<String, String> e : concreteDocumentTypes.entrySet()) { ContainerDocumentConfig.Doctype.Builder dtb = new ContainerDocumentConfig.Doctype.Builder(); dtb.type(e.getKey()); dtb.factorycomponent(e.getValue()); builder.doctype(dtb); } } @Override public void getConfig(HealthMonitorConfig.Builder builder) { Monitoring monitoring = getMonitoringService(); if (monitoring != null) { builder.snapshot_interval(monitoring.getIntervalSeconds()); } } @Override public void getConfig(ApplicationMetadataConfig.Builder builder) { if (applicationMetaData != null) { builder.name(applicationMetaData.getApplicationName()). user(applicationMetaData.getDeployedByUser()). path(applicationMetaData.getDeployPath()). timestamp(applicationMetaData.getDeployTimestamp()). checksum(applicationMetaData.getCheckSum()). generation(applicationMetaData.getGeneration()); } } /** * Adds a bundle present at a known location at the target container nodes. * * @param bundlePath usually an absolute path, e.g. '$VESPA_HOME/lib/jars/foo.jar' */ public final void addPlatformBundle(Path bundlePath) { platformBundles.add(bundlePath); } @Override public void getConfig(BundlesConfig.Builder builder) { platformBundles.stream() .map(ContainerCluster::toFileReferenceString) .forEach(builder::bundle); } private static String toFileReferenceString(Path path) { return DISK_BUNDLE_PREFIX + path.toString(); } @Override public void getConfig(QrSearchersConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(QrStartConfig.Builder builder) { QrStartConfig.Jvm.Builder jvmBuilder = builder.jvm; if (getMemoryPercentage().isPresent()) { jvmBuilder.heapSizeAsPercentageOfPhysicalMemory(getMemoryPercentage().get()); } else if (isHostedVespa()) { jvmBuilder.heapSizeAsPercentageOfPhysicalMemory(getHostClusterId().isPresent() ? 17 : 60); } jvmBuilder.gcopts(Objects.requireNonNullElse(jvmGCOptions, G1GC)); if (environmentVars != null) { builder.qrs.env(environmentVars); } } @Override public void getConfig(DocprocConfig.Builder builder) { if (containerDocproc != null) containerDocproc.getConfig(builder); } @Override public void getConfig(PageTemplatesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(SemanticRulesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(QueryProfilesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(SchemamappingConfig.Builder builder) { if (containerDocproc != null) containerDocproc.getConfig(builder); } @Override public void getConfig(IndexInfoConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(ContainerMbusConfig.Builder builder) { if (mbusParams != null) { if (mbusParams.maxConcurrentFactor != null) builder.maxConcurrentFactor(mbusParams.maxConcurrentFactor); if (mbusParams.documentExpansionFactor != null) builder.documentExpansionFactor(mbusParams.documentExpansionFactor); if (mbusParams.containerCoreMemory != null) builder.containerCoreMemory(mbusParams.containerCoreMemory); } if (containerDocproc != null) containerDocproc.getConfig(builder); } public void setMbusParams(MbusParams mbusParams) { this.mbusParams = mbusParams; } public void initialize(Map<String, AbstractSearchCluster> clusterMap) { if (containerSearch != null) containerSearch.connectSearchClusters(clusterMap); } public void addDefaultSearchAccessLog() { addComponent(new AccessLogComponent(AccessLogComponent.AccessLogType.jsonAccessLog, getName(), isHostedVespa)); } @Override public void getConfig(IlscriptsConfig.Builder builder) { List<AbstractSearchCluster> searchClusters = new ArrayList<>(); searchClusters.addAll(Content.getSearchClusters(getRoot().configModelRepo())); for (AbstractSearchCluster searchCluster : searchClusters) { searchCluster.getConfig(builder); } } @Override public void getConfig(ClusterInfoConfig.Builder builder) { builder.clusterId(name); builder.nodeCount(containers.size()); for (Service service : getDescendantServices()) { builder.services.add(new ClusterInfoConfig.Services.Builder() .index(Integer.parseInt(service.getServicePropertyString("index", "99999"))) .hostname(service.getHostName()) .ports(getPorts(service))); } } /** * Returns a config server config containing the right zone settings (and defaults for the rest). * This is useful to allow applications to find out in which zone they are runnung by having the Zone * object (which is constructed from this config) injected. */ @Override public void getConfig(ConfigserverConfig.Builder builder) { builder.system(zone.system().value()); builder.environment(zone.environment().value()); builder.region(zone.region().value()); } private List<ClusterInfoConfig.Services.Ports.Builder> getPorts(Service service) { List<ClusterInfoConfig.Services.Ports.Builder> builders = new ArrayList<>(); PortsMeta portsMeta = service.getPortsMeta(); for (int i = 0; i < portsMeta.getNumPorts(); i++) { builders.add(new ClusterInfoConfig.Services.Ports.Builder() .number(service.getRelativePort(i)) .tags(ApplicationConfigProducerRoot.getPortTags(portsMeta, i)) ); } return builders; } public boolean isHostedVespa() { return isHostedVespa; } @Override public void getConfig(RoutingProviderConfig.Builder builder) { builder.enabled(isHostedVespa); } public Map<String, String> concreteDocumentTypes() { return concreteDocumentTypes; } /** The configured service aliases for the service in this cluster */ public List<String> serviceAliases() { return serviceAliases; } /** The configured endpoint aliases (fqdn) for the service in this cluster */ public List<String> endpointAliases() { return endpointAliases; } public void setHostClusterId(String clusterId) { hostClusterId = clusterId; } /** * Returns the id of the content cluster which hosts this container cluster, if any. * This is only set with hosted clusters where this container cluster is set up to run on the nodes * of a content cluster. */ public Optional<String> getHostClusterId() { return Optional.ofNullable(hostClusterId); } public void setMemoryPercentage(Integer memoryPercentage) { this.memoryPercentage = memoryPercentage; } public void setJvmGCOptions(String opts) { this.jvmGCOptions = opts; } public void setEnvironmentVars(String environmentVars) { this.environmentVars = environmentVars; } public Optional<String> getJvmGCOptions() { return Optional.ofNullable(jvmGCOptions); } /** * Returns the percentage of host physical memory this application has specified for nodes in this cluster, * or empty if this is not specified by the application. */ public Optional<Integer> getMemoryPercentage() { return Optional.ofNullable(memoryPercentage); } public final void setMessageBusEnabled(boolean messageBusEnabled) { this.messageBusEnabled = messageBusEnabled; } boolean messageBusEnabled() { return messageBusEnabled; } public final void setRpcServerEnabled(boolean rpcServerEnabled) { this.rpcServerEnabled = rpcServerEnabled; } boolean rpcServerEnabled() { return rpcServerEnabled; } boolean httpServerEnabled() { return httpServerEnabled; } public void setHttpServerEnabled(boolean httpServerEnabled) { this.httpServerEnabled = httpServerEnabled; } @Override public String toString() { return "container cluster '" + getName() + "'"; } public static class MbusParams { final Double maxConcurrentFactor; final Double documentExpansionFactor; final Integer containerCoreMemory; public MbusParams(Double maxConcurrentFactor, Double documentExpansionFactor, Integer containerCoreMemory) { this.maxConcurrentFactor = maxConcurrentFactor; this.documentExpansionFactor = documentExpansionFactor; this.containerCoreMemory = containerCoreMemory; } } }
Yes, it should. Fixed.
private void addTestrunnerComponentsIfTester(DeployState deployState) { if (deployState.getProperties().applicationId().instance().isTester()) addPlatformBundle(Paths.get(Defaults.getDefaults().underVespaHome("vespa-testrunner-components-jar-with-dependencies.jar"))); }
addPlatformBundle(Paths.get(Defaults.getDefaults().underVespaHome("vespa-testrunner-components-jar-with-dependencies.jar")));
private void addTestrunnerComponentsIfTester(DeployState deployState) { if (deployState.getProperties().applicationId().instance().isTester()) addPlatformBundle(Paths.get(Defaults.getDefaults().underVespaHome("vespa-testrunner-components-jar-with-dependencies.jar"))); }
class ContainerCluster<CONTAINER extends Container> extends AbstractConfigProducer<AbstractConfigProducer<?>> implements ComponentsConfig.Producer, JdiscBindingsConfig.Producer, DocumentmanagerConfig.Producer, ContainerMbusConfig.Producer, ContainerDocumentConfig.Producer, HealthMonitorConfig.Producer, ApplicationMetadataConfig.Producer, BundlesConfig.Producer, IndexInfoConfig.Producer, IlscriptsConfig.Producer, SchemamappingConfig.Producer, QrSearchersConfig.Producer, QrStartConfig.Producer, QueryProfilesConfig.Producer, PageTemplatesConfig.Producer, SemanticRulesConfig.Producer, DocprocConfig.Producer, ClusterInfoConfig.Producer, RoutingProviderConfig.Producer, ConfigserverConfig.Producer { /** * URI prefix used for internal, usually programmatic, APIs. URIs using this * prefix should never considered available for direct use by customers, and * normal compatibility concerns only applies to libraries using the URIs in * question, not contents served from the URIs themselves. */ public static final String RESERVED_URI_PREFIX = "reserved-for-internal-use"; public static final String APPLICATION_STATUS_HANDLER_CLASS = "com.yahoo.container.handler.observability.ApplicationStatusHandler"; public static final String BINDINGS_OVERVIEW_HANDLER_CLASS = BindingsOverviewHandler.class.getName(); public static final String STATE_HANDLER_CLASS = "com.yahoo.container.jdisc.state.StateHandler"; public static final String STATISTICS_HANDLER_CLASS = "com.yahoo.container.config.StatisticsRequestHandler"; public static final String LOG_HANDLER_CLASS = com.yahoo.container.handler.LogHandler.class.getName(); public static final String DEFAULT_LINGUISTICS_PROVIDER = "com.yahoo.language.provider.DefaultLinguisticsProvider"; public static final String CMS = "-XX:+UseConcMarkSweepGC -XX:MaxTenuringThreshold=15 -XX:NewRatio=1"; public static final String G1GC = "-XX:+UseG1GC -XX:MaxTenuringThreshold=15"; public static final String ROOT_HANDLER_BINDING = "*: private final String name; protected List<CONTAINER> containers = new ArrayList<>(); private Http http; private ProcessingChains processingChains; private ContainerSearch containerSearch; private ContainerDocproc containerDocproc; private ContainerDocumentApi containerDocumentApi; private SecretStore secretStore; private MbusParams mbusParams; private boolean messageBusEnabled = true; private boolean rpcServerEnabled = true; private boolean httpServerEnabled = true; private final Set<Path> platformBundles = new LinkedHashSet<>(); private final List<String> serviceAliases = new ArrayList<>(); private final List<String> endpointAliases = new ArrayList<>(); private final ComponentGroup<Component<?, ?>> componentGroup; private final boolean isHostedVespa; private Map<String, String> concreteDocumentTypes = new LinkedHashMap<>(); private ApplicationMetaData applicationMetaData = null; /** The zone this is deployed in, or the default zone if not on hosted Vespa */ private Zone zone; private String hostClusterId = null; private String jvmGCOptions = null; private String environmentVars = null; private Integer memoryPercentage = null; public ContainerCluster(AbstractConfigProducer<?> parent, String subId, String name, DeployState deployState) { super(parent, subId); this.name = name; this.isHostedVespa = stateIsHosted(deployState); this.zone = (deployState != null) ? deployState.zone() : Zone.defaultZone(); componentGroup = new ComponentGroup<>(this, "component"); addComponent(new StatisticsComponent()); addSimpleComponent(AccessLog.class); addSimpleComponent(ThreadPoolProvider.class); addSimpleComponent(com.yahoo.concurrent.classlock.ClassLocking.class); addSimpleComponent(SecurityFilterInvoker.class); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricConsumerProviderProvider"); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricProvider"); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricUpdater"); addSimpleComponent(com.yahoo.container.jdisc.LoggingRequestHandler.Context.class); addSimpleComponent(com.yahoo.metrics.simple.MetricManager.class.getName(), null, MetricProperties.BUNDLE_SYMBOLIC_NAME); addSimpleComponent(com.yahoo.metrics.simple.jdisc.JdiscMetricsFactory.class.getName(), null, MetricProperties.BUNDLE_SYMBOLIC_NAME); addSimpleComponent("com.yahoo.container.jdisc.state.StateMonitor"); addSimpleComponent("com.yahoo.container.jdisc.ContainerThreadFactory"); addSimpleComponent("com.yahoo.container.handler.VipStatus"); addSimpleComponent(com.yahoo.container.handler.ClustersStatus.class.getName()); addJaxProviders(); addTestrunnerComponentsIfTester(deployState); } public void setZone(Zone zone) { this.zone = zone; } public Zone getZone() { return zone; } public void addDefaultHandlersWithVip() { addDefaultHandlersExceptStatus(); addVipHandler(); } public final void addDefaultHandlersExceptStatus() { addDefaultRootHandler(); addMetricStateHandler(); addApplicationStatusHandler(); } public void addMetricStateHandler() { Handler<AbstractConfigProducer<?>> stateHandler = new Handler<>( new ComponentModel(STATE_HANDLER_CLASS, null, null, null)); stateHandler.addServerBindings("http: "http: addComponent(stateHandler); } public void addDefaultRootHandler() { if (hasHandlerWithBinding(ROOT_HANDLER_BINDING)) return; Handler<AbstractConfigProducer<?>> handler = new Handler<>( new ComponentModel(BundleInstantiationSpecification.getFromStrings( BINDINGS_OVERVIEW_HANDLER_CLASS, null, null), null)); handler.addServerBindings(ROOT_HANDLER_BINDING); addComponent(handler); } private boolean hasHandlerWithBinding(String binding) { Collection<Handler<?>> handlers = getHandlers(); for (Handler handler : handlers) { if (handler.getServerBindings().contains(binding)) return true; } return false; } public void addApplicationStatusHandler() { Handler<AbstractConfigProducer<?>> statusHandler = new Handler<>( new ComponentModel(BundleInstantiationSpecification.getInternalHandlerSpecificationFromStrings( APPLICATION_STATUS_HANDLER_CLASS, null), null)); statusHandler.addServerBindings("http: addComponent(statusHandler); } public void addVipHandler() { Handler<?> vipHandler = Handler.fromClassName(FileStatusHandlerComponent.CLASS); vipHandler.addServerBindings("http: addComponent(vipHandler); } @SuppressWarnings("deprecation") private void addJaxProviders() { addSimpleComponent(com.yahoo.container.xml.providers.DatatypeFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.DocumentBuilderFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.SAXParserFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.SchemaFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.TransformerFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLEventFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLInputFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLOutputFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XPathFactoryProvider.class); } public final void addComponent(Component<?, ?> component) { componentGroup.addComponent(component); } public final void addSimpleComponent(String idSpec, String classSpec, String bundleSpec) { addComponent(new SimpleComponent(new ComponentModel(idSpec, classSpec, bundleSpec))); } /** * Removes a component by id * * @return the removed component, or null if it was not present */ public Component removeComponent(ComponentId componentId) { return componentGroup.removeComponent(componentId); } private void addSimpleComponent(Class<?> clazz) { addSimpleComponent(clazz.getName()); } protected void addSimpleComponent(String className) { addComponent(new SimpleComponent(className)); } public void prepare(DeployState deployState) { applicationMetaData = deployState.getApplicationPackage().getMetaData(); doPrepare(deployState); } protected abstract void doPrepare(DeployState deployState); public void addMbusServer(ComponentId chainId) { ComponentId serviceId = chainId.nestInNamespace(ComponentId.fromString("MbusServer")); addComponent( new Component<>(new ComponentModel(new BundleInstantiationSpecification( serviceId, ComponentSpecification.fromString(MbusServerProvider.class.getName()), null)))); } public String getName() { return name; } public List<CONTAINER> getContainers() { return Collections.unmodifiableList(containers); } public void addContainer(CONTAINER container) { container.setClusterName(name); container.setProp("clustername", name) .setProp("index", this.containers.size()); containers.add(container); } public void addContainers(Collection<CONTAINER> containers) { containers.forEach(this::addContainer); } public void setProcessingChains(ProcessingChains processingChains, String... serverBindings) { if (this.processingChains != null) throw new IllegalStateException("ProcessingChains should only be set once."); this.processingChains = processingChains; ProcessingHandler<?> processingHandler = new ProcessingHandler<>( processingChains, "com.yahoo.processing.handler.ProcessingHandler"); for (String binding: serverBindings) processingHandler.addServerBindings(binding); addComponent(processingHandler); } ProcessingChains getProcessingChains() { return processingChains; } @NonNull public SearchChains getSearchChains() { if (containerSearch == null) throw new IllegalStateException("Search components not found in container cluster '" + getSubId() + "': Add <search/> to the cluster in services.xml"); return containerSearch.getChains(); } @Nullable public ContainerSearch getSearch() { return containerSearch; } public void setSearch(ContainerSearch containerSearch) { this.containerSearch = containerSearch; } public void setHttp(Http http) { this.http = http; addChild(http); } @Nullable public Http getHttp() { return http; } @Nullable public ContainerDocproc getDocproc() { return containerDocproc; } public void setDocproc(ContainerDocproc containerDocproc) { this.containerDocproc = containerDocproc; } @Nullable public ContainerDocumentApi getDocumentApi() { return containerDocumentApi; } public void setDocumentApi(ContainerDocumentApi containerDocumentApi) { this.containerDocumentApi = containerDocumentApi; } @NonNull public DocprocChains getDocprocChains() { if (containerDocproc == null) throw new IllegalStateException("Document processing components not found in container cluster '" + getSubId() + "': Add <document-processing/> to the cluster in services.xml"); return containerDocproc.getChains(); } @SuppressWarnings("unchecked") public Collection<Handler<?>> getHandlers() { return (Collection<Handler<?>>)(Collection)componentGroup.getComponents(Handler.class); } public void setSecretStore(SecretStore secretStore) { this.secretStore = secretStore; } public Optional<SecretStore> getSecretStore() { return Optional.ofNullable(secretStore); } public Map<ComponentId, Component<?, ?>> getComponentsMap() { return componentGroup.getComponentMap(); } /** Returns all components in this cluster (generic, handlers, chained) */ public Collection<Component<?, ?>> getAllComponents() { List<Component<?, ?>> allComponents = new ArrayList<>(); recursivelyFindAllComponents(allComponents, this); Collections.sort(allComponents); return Collections.unmodifiableCollection(allComponents); } private void recursivelyFindAllComponents(Collection<Component<?, ?>> allComponents, AbstractConfigProducer<?> current) { for (AbstractConfigProducer<?> child: current.getChildren().values()) { if (child instanceof Component) allComponents.add((Component<?, ?>) child); if (!(child instanceof Container)) recursivelyFindAllComponents(allComponents, child); } } @Override public void getConfig(ComponentsConfig.Builder builder) { builder.components.addAll(ComponentsConfigGenerator.generate(getAllComponents())); builder.components(new ComponentsConfig.Components.Builder().id("com.yahoo.container.core.config.HandlersConfigurerDi$RegistriesHack")); } @Override public void getConfig(JdiscBindingsConfig.Builder builder) { builder.handlers.putAll(DiscBindingsConfigGenerator.generate(getHandlers())); } @Override public void getConfig(DocumentmanagerConfig.Builder builder) { if (containerDocproc != null && containerDocproc.isCompressDocuments()) builder.enablecompression(true); } @Override public void getConfig(ContainerDocumentConfig.Builder builder) { for (Map.Entry<String, String> e : concreteDocumentTypes.entrySet()) { ContainerDocumentConfig.Doctype.Builder dtb = new ContainerDocumentConfig.Doctype.Builder(); dtb.type(e.getKey()); dtb.factorycomponent(e.getValue()); builder.doctype(dtb); } } @Override public void getConfig(HealthMonitorConfig.Builder builder) { Monitoring monitoring = getMonitoringService(); if (monitoring != null) { builder.snapshot_interval(monitoring.getIntervalSeconds()); } } @Override public void getConfig(ApplicationMetadataConfig.Builder builder) { if (applicationMetaData != null) { builder.name(applicationMetaData.getApplicationName()). user(applicationMetaData.getDeployedByUser()). path(applicationMetaData.getDeployPath()). timestamp(applicationMetaData.getDeployTimestamp()). checksum(applicationMetaData.getCheckSum()). generation(applicationMetaData.getGeneration()); } } /** * Adds a bundle present at a known location at the target container nodes. * * @param bundlePath usually an absolute path, e.g. '$VESPA_HOME/lib/jars/foo.jar' */ public final void addPlatformBundle(Path bundlePath) { platformBundles.add(bundlePath); } @Override public void getConfig(BundlesConfig.Builder builder) { platformBundles.stream() .map(ContainerCluster::toFileReferenceString) .forEach(builder::bundle); } private static String toFileReferenceString(Path path) { return DISK_BUNDLE_PREFIX + path.toString(); } @Override public void getConfig(QrSearchersConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(QrStartConfig.Builder builder) { QrStartConfig.Jvm.Builder jvmBuilder = builder.jvm; if (getMemoryPercentage().isPresent()) { jvmBuilder.heapSizeAsPercentageOfPhysicalMemory(getMemoryPercentage().get()); } else if (isHostedVespa()) { jvmBuilder.heapSizeAsPercentageOfPhysicalMemory(getHostClusterId().isPresent() ? 17 : 60); } jvmBuilder.gcopts(Objects.requireNonNullElse(jvmGCOptions, G1GC)); if (environmentVars != null) { builder.qrs.env(environmentVars); } } @Override public void getConfig(DocprocConfig.Builder builder) { if (containerDocproc != null) containerDocproc.getConfig(builder); } @Override public void getConfig(PageTemplatesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(SemanticRulesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(QueryProfilesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(SchemamappingConfig.Builder builder) { if (containerDocproc != null) containerDocproc.getConfig(builder); } @Override public void getConfig(IndexInfoConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(ContainerMbusConfig.Builder builder) { if (mbusParams != null) { if (mbusParams.maxConcurrentFactor != null) builder.maxConcurrentFactor(mbusParams.maxConcurrentFactor); if (mbusParams.documentExpansionFactor != null) builder.documentExpansionFactor(mbusParams.documentExpansionFactor); if (mbusParams.containerCoreMemory != null) builder.containerCoreMemory(mbusParams.containerCoreMemory); } if (containerDocproc != null) containerDocproc.getConfig(builder); } public void setMbusParams(MbusParams mbusParams) { this.mbusParams = mbusParams; } public void initialize(Map<String, AbstractSearchCluster> clusterMap) { if (containerSearch != null) containerSearch.connectSearchClusters(clusterMap); } public void addDefaultSearchAccessLog() { addComponent(new AccessLogComponent(AccessLogComponent.AccessLogType.jsonAccessLog, getName(), isHostedVespa)); } @Override public void getConfig(IlscriptsConfig.Builder builder) { List<AbstractSearchCluster> searchClusters = new ArrayList<>(); searchClusters.addAll(Content.getSearchClusters(getRoot().configModelRepo())); for (AbstractSearchCluster searchCluster : searchClusters) { searchCluster.getConfig(builder); } } @Override public void getConfig(ClusterInfoConfig.Builder builder) { builder.clusterId(name); builder.nodeCount(containers.size()); for (Service service : getDescendantServices()) { builder.services.add(new ClusterInfoConfig.Services.Builder() .index(Integer.parseInt(service.getServicePropertyString("index", "99999"))) .hostname(service.getHostName()) .ports(getPorts(service))); } } /** * Returns a config server config containing the right zone settings (and defaults for the rest). * This is useful to allow applications to find out in which zone they are runnung by having the Zone * object (which is constructed from this config) injected. */ @Override public void getConfig(ConfigserverConfig.Builder builder) { builder.system(zone.system().value()); builder.environment(zone.environment().value()); builder.region(zone.region().value()); } private List<ClusterInfoConfig.Services.Ports.Builder> getPorts(Service service) { List<ClusterInfoConfig.Services.Ports.Builder> builders = new ArrayList<>(); PortsMeta portsMeta = service.getPortsMeta(); for (int i = 0; i < portsMeta.getNumPorts(); i++) { builders.add(new ClusterInfoConfig.Services.Ports.Builder() .number(service.getRelativePort(i)) .tags(ApplicationConfigProducerRoot.getPortTags(portsMeta, i)) ); } return builders; } public boolean isHostedVespa() { return isHostedVespa; } @Override public void getConfig(RoutingProviderConfig.Builder builder) { builder.enabled(isHostedVespa); } public Map<String, String> concreteDocumentTypes() { return concreteDocumentTypes; } /** The configured service aliases for the service in this cluster */ public List<String> serviceAliases() { return serviceAliases; } /** The configured endpoint aliases (fqdn) for the service in this cluster */ public List<String> endpointAliases() { return endpointAliases; } public void setHostClusterId(String clusterId) { hostClusterId = clusterId; } /** * Returns the id of the content cluster which hosts this container cluster, if any. * This is only set with hosted clusters where this container cluster is set up to run on the nodes * of a content cluster. */ public Optional<String> getHostClusterId() { return Optional.ofNullable(hostClusterId); } public void setMemoryPercentage(Integer memoryPercentage) { this.memoryPercentage = memoryPercentage; } public void setJvmGCOptions(String opts) { this.jvmGCOptions = opts; } public void setEnvironmentVars(String environmentVars) { this.environmentVars = environmentVars; } public Optional<String> getJvmGCOptions() { return Optional.ofNullable(jvmGCOptions); } /** * Returns the percentage of host physical memory this application has specified for nodes in this cluster, * or empty if this is not specified by the application. */ public Optional<Integer> getMemoryPercentage() { return Optional.ofNullable(memoryPercentage); } public final void setMessageBusEnabled(boolean messageBusEnabled) { this.messageBusEnabled = messageBusEnabled; } boolean messageBusEnabled() { return messageBusEnabled; } public final void setRpcServerEnabled(boolean rpcServerEnabled) { this.rpcServerEnabled = rpcServerEnabled; } boolean rpcServerEnabled() { return rpcServerEnabled; } boolean httpServerEnabled() { return httpServerEnabled; } public void setHttpServerEnabled(boolean httpServerEnabled) { this.httpServerEnabled = httpServerEnabled; } @Override public String toString() { return "container cluster '" + getName() + "'"; } public static class MbusParams { final Double maxConcurrentFactor; final Double documentExpansionFactor; final Integer containerCoreMemory; public MbusParams(Double maxConcurrentFactor, Double documentExpansionFactor, Integer containerCoreMemory) { this.maxConcurrentFactor = maxConcurrentFactor; this.documentExpansionFactor = documentExpansionFactor; this.containerCoreMemory = containerCoreMemory; } } }
class ContainerCluster<CONTAINER extends Container> extends AbstractConfigProducer<AbstractConfigProducer<?>> implements ComponentsConfig.Producer, JdiscBindingsConfig.Producer, DocumentmanagerConfig.Producer, ContainerMbusConfig.Producer, ContainerDocumentConfig.Producer, HealthMonitorConfig.Producer, ApplicationMetadataConfig.Producer, BundlesConfig.Producer, IndexInfoConfig.Producer, IlscriptsConfig.Producer, SchemamappingConfig.Producer, QrSearchersConfig.Producer, QrStartConfig.Producer, QueryProfilesConfig.Producer, PageTemplatesConfig.Producer, SemanticRulesConfig.Producer, DocprocConfig.Producer, ClusterInfoConfig.Producer, RoutingProviderConfig.Producer, ConfigserverConfig.Producer { /** * URI prefix used for internal, usually programmatic, APIs. URIs using this * prefix should never considered available for direct use by customers, and * normal compatibility concerns only applies to libraries using the URIs in * question, not contents served from the URIs themselves. */ public static final String RESERVED_URI_PREFIX = "reserved-for-internal-use"; public static final String APPLICATION_STATUS_HANDLER_CLASS = "com.yahoo.container.handler.observability.ApplicationStatusHandler"; public static final String BINDINGS_OVERVIEW_HANDLER_CLASS = BindingsOverviewHandler.class.getName(); public static final String STATE_HANDLER_CLASS = "com.yahoo.container.jdisc.state.StateHandler"; public static final String STATISTICS_HANDLER_CLASS = "com.yahoo.container.config.StatisticsRequestHandler"; public static final String LOG_HANDLER_CLASS = com.yahoo.container.handler.LogHandler.class.getName(); public static final String DEFAULT_LINGUISTICS_PROVIDER = "com.yahoo.language.provider.DefaultLinguisticsProvider"; public static final String CMS = "-XX:+UseConcMarkSweepGC -XX:MaxTenuringThreshold=15 -XX:NewRatio=1"; public static final String G1GC = "-XX:+UseG1GC -XX:MaxTenuringThreshold=15"; public static final String ROOT_HANDLER_BINDING = "*: private final String name; protected List<CONTAINER> containers = new ArrayList<>(); private Http http; private ProcessingChains processingChains; private ContainerSearch containerSearch; private ContainerDocproc containerDocproc; private ContainerDocumentApi containerDocumentApi; private SecretStore secretStore; private MbusParams mbusParams; private boolean messageBusEnabled = true; private boolean rpcServerEnabled = true; private boolean httpServerEnabled = true; private final Set<Path> platformBundles = new LinkedHashSet<>(); private final List<String> serviceAliases = new ArrayList<>(); private final List<String> endpointAliases = new ArrayList<>(); private final ComponentGroup<Component<?, ?>> componentGroup; private final boolean isHostedVespa; private Map<String, String> concreteDocumentTypes = new LinkedHashMap<>(); private ApplicationMetaData applicationMetaData = null; /** The zone this is deployed in, or the default zone if not on hosted Vespa */ private Zone zone; private String hostClusterId = null; private String jvmGCOptions = null; private String environmentVars = null; private Integer memoryPercentage = null; public ContainerCluster(AbstractConfigProducer<?> parent, String subId, String name, DeployState deployState) { super(parent, subId); this.name = name; this.isHostedVespa = stateIsHosted(deployState); this.zone = (deployState != null) ? deployState.zone() : Zone.defaultZone(); componentGroup = new ComponentGroup<>(this, "component"); addComponent(new StatisticsComponent()); addSimpleComponent(AccessLog.class); addSimpleComponent(ThreadPoolProvider.class); addSimpleComponent(com.yahoo.concurrent.classlock.ClassLocking.class); addSimpleComponent(SecurityFilterInvoker.class); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricConsumerProviderProvider"); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricProvider"); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricUpdater"); addSimpleComponent(com.yahoo.container.jdisc.LoggingRequestHandler.Context.class); addSimpleComponent(com.yahoo.metrics.simple.MetricManager.class.getName(), null, MetricProperties.BUNDLE_SYMBOLIC_NAME); addSimpleComponent(com.yahoo.metrics.simple.jdisc.JdiscMetricsFactory.class.getName(), null, MetricProperties.BUNDLE_SYMBOLIC_NAME); addSimpleComponent("com.yahoo.container.jdisc.state.StateMonitor"); addSimpleComponent("com.yahoo.container.jdisc.ContainerThreadFactory"); addSimpleComponent("com.yahoo.container.handler.VipStatus"); addSimpleComponent(com.yahoo.container.handler.ClustersStatus.class.getName()); addJaxProviders(); addTestrunnerComponentsIfTester(deployState); } public void setZone(Zone zone) { this.zone = zone; } public Zone getZone() { return zone; } public void addDefaultHandlersWithVip() { addDefaultHandlersExceptStatus(); addVipHandler(); } public final void addDefaultHandlersExceptStatus() { addDefaultRootHandler(); addMetricStateHandler(); addApplicationStatusHandler(); } public void addMetricStateHandler() { Handler<AbstractConfigProducer<?>> stateHandler = new Handler<>( new ComponentModel(STATE_HANDLER_CLASS, null, null, null)); stateHandler.addServerBindings("http: "http: addComponent(stateHandler); } public void addDefaultRootHandler() { if (hasHandlerWithBinding(ROOT_HANDLER_BINDING)) return; Handler<AbstractConfigProducer<?>> handler = new Handler<>( new ComponentModel(BundleInstantiationSpecification.getFromStrings( BINDINGS_OVERVIEW_HANDLER_CLASS, null, null), null)); handler.addServerBindings(ROOT_HANDLER_BINDING); addComponent(handler); } private boolean hasHandlerWithBinding(String binding) { Collection<Handler<?>> handlers = getHandlers(); for (Handler handler : handlers) { if (handler.getServerBindings().contains(binding)) return true; } return false; } public void addApplicationStatusHandler() { Handler<AbstractConfigProducer<?>> statusHandler = new Handler<>( new ComponentModel(BundleInstantiationSpecification.getInternalHandlerSpecificationFromStrings( APPLICATION_STATUS_HANDLER_CLASS, null), null)); statusHandler.addServerBindings("http: addComponent(statusHandler); } public void addVipHandler() { Handler<?> vipHandler = Handler.fromClassName(FileStatusHandlerComponent.CLASS); vipHandler.addServerBindings("http: addComponent(vipHandler); } @SuppressWarnings("deprecation") private void addJaxProviders() { addSimpleComponent(com.yahoo.container.xml.providers.DatatypeFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.DocumentBuilderFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.SAXParserFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.SchemaFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.TransformerFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLEventFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLInputFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLOutputFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XPathFactoryProvider.class); } public final void addComponent(Component<?, ?> component) { componentGroup.addComponent(component); } public final void addSimpleComponent(String idSpec, String classSpec, String bundleSpec) { addComponent(new SimpleComponent(new ComponentModel(idSpec, classSpec, bundleSpec))); } /** * Removes a component by id * * @return the removed component, or null if it was not present */ public Component removeComponent(ComponentId componentId) { return componentGroup.removeComponent(componentId); } private void addSimpleComponent(Class<?> clazz) { addSimpleComponent(clazz.getName()); } protected void addSimpleComponent(String className) { addComponent(new SimpleComponent(className)); } public void prepare(DeployState deployState) { applicationMetaData = deployState.getApplicationPackage().getMetaData(); doPrepare(deployState); } protected abstract void doPrepare(DeployState deployState); public void addMbusServer(ComponentId chainId) { ComponentId serviceId = chainId.nestInNamespace(ComponentId.fromString("MbusServer")); addComponent( new Component<>(new ComponentModel(new BundleInstantiationSpecification( serviceId, ComponentSpecification.fromString(MbusServerProvider.class.getName()), null)))); } public String getName() { return name; } public List<CONTAINER> getContainers() { return Collections.unmodifiableList(containers); } public void addContainer(CONTAINER container) { container.setClusterName(name); container.setProp("clustername", name) .setProp("index", this.containers.size()); containers.add(container); } public void addContainers(Collection<CONTAINER> containers) { containers.forEach(this::addContainer); } public void setProcessingChains(ProcessingChains processingChains, String... serverBindings) { if (this.processingChains != null) throw new IllegalStateException("ProcessingChains should only be set once."); this.processingChains = processingChains; ProcessingHandler<?> processingHandler = new ProcessingHandler<>( processingChains, "com.yahoo.processing.handler.ProcessingHandler"); for (String binding: serverBindings) processingHandler.addServerBindings(binding); addComponent(processingHandler); } ProcessingChains getProcessingChains() { return processingChains; } @NonNull public SearchChains getSearchChains() { if (containerSearch == null) throw new IllegalStateException("Search components not found in container cluster '" + getSubId() + "': Add <search/> to the cluster in services.xml"); return containerSearch.getChains(); } @Nullable public ContainerSearch getSearch() { return containerSearch; } public void setSearch(ContainerSearch containerSearch) { this.containerSearch = containerSearch; } public void setHttp(Http http) { this.http = http; addChild(http); } @Nullable public Http getHttp() { return http; } @Nullable public ContainerDocproc getDocproc() { return containerDocproc; } public void setDocproc(ContainerDocproc containerDocproc) { this.containerDocproc = containerDocproc; } @Nullable public ContainerDocumentApi getDocumentApi() { return containerDocumentApi; } public void setDocumentApi(ContainerDocumentApi containerDocumentApi) { this.containerDocumentApi = containerDocumentApi; } @NonNull public DocprocChains getDocprocChains() { if (containerDocproc == null) throw new IllegalStateException("Document processing components not found in container cluster '" + getSubId() + "': Add <document-processing/> to the cluster in services.xml"); return containerDocproc.getChains(); } @SuppressWarnings("unchecked") public Collection<Handler<?>> getHandlers() { return (Collection<Handler<?>>)(Collection)componentGroup.getComponents(Handler.class); } public void setSecretStore(SecretStore secretStore) { this.secretStore = secretStore; } public Optional<SecretStore> getSecretStore() { return Optional.ofNullable(secretStore); } public Map<ComponentId, Component<?, ?>> getComponentsMap() { return componentGroup.getComponentMap(); } /** Returns all components in this cluster (generic, handlers, chained) */ public Collection<Component<?, ?>> getAllComponents() { List<Component<?, ?>> allComponents = new ArrayList<>(); recursivelyFindAllComponents(allComponents, this); Collections.sort(allComponents); return Collections.unmodifiableCollection(allComponents); } private void recursivelyFindAllComponents(Collection<Component<?, ?>> allComponents, AbstractConfigProducer<?> current) { for (AbstractConfigProducer<?> child: current.getChildren().values()) { if (child instanceof Component) allComponents.add((Component<?, ?>) child); if (!(child instanceof Container)) recursivelyFindAllComponents(allComponents, child); } } @Override public void getConfig(ComponentsConfig.Builder builder) { builder.components.addAll(ComponentsConfigGenerator.generate(getAllComponents())); builder.components(new ComponentsConfig.Components.Builder().id("com.yahoo.container.core.config.HandlersConfigurerDi$RegistriesHack")); } @Override public void getConfig(JdiscBindingsConfig.Builder builder) { builder.handlers.putAll(DiscBindingsConfigGenerator.generate(getHandlers())); } @Override public void getConfig(DocumentmanagerConfig.Builder builder) { if (containerDocproc != null && containerDocproc.isCompressDocuments()) builder.enablecompression(true); } @Override public void getConfig(ContainerDocumentConfig.Builder builder) { for (Map.Entry<String, String> e : concreteDocumentTypes.entrySet()) { ContainerDocumentConfig.Doctype.Builder dtb = new ContainerDocumentConfig.Doctype.Builder(); dtb.type(e.getKey()); dtb.factorycomponent(e.getValue()); builder.doctype(dtb); } } @Override public void getConfig(HealthMonitorConfig.Builder builder) { Monitoring monitoring = getMonitoringService(); if (monitoring != null) { builder.snapshot_interval(monitoring.getIntervalSeconds()); } } @Override public void getConfig(ApplicationMetadataConfig.Builder builder) { if (applicationMetaData != null) { builder.name(applicationMetaData.getApplicationName()). user(applicationMetaData.getDeployedByUser()). path(applicationMetaData.getDeployPath()). timestamp(applicationMetaData.getDeployTimestamp()). checksum(applicationMetaData.getCheckSum()). generation(applicationMetaData.getGeneration()); } } /** * Adds a bundle present at a known location at the target container nodes. * * @param bundlePath usually an absolute path, e.g. '$VESPA_HOME/lib/jars/foo.jar' */ public final void addPlatformBundle(Path bundlePath) { platformBundles.add(bundlePath); } @Override public void getConfig(BundlesConfig.Builder builder) { platformBundles.stream() .map(ContainerCluster::toFileReferenceString) .forEach(builder::bundle); } private static String toFileReferenceString(Path path) { return DISK_BUNDLE_PREFIX + path.toString(); } @Override public void getConfig(QrSearchersConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(QrStartConfig.Builder builder) { QrStartConfig.Jvm.Builder jvmBuilder = builder.jvm; if (getMemoryPercentage().isPresent()) { jvmBuilder.heapSizeAsPercentageOfPhysicalMemory(getMemoryPercentage().get()); } else if (isHostedVespa()) { jvmBuilder.heapSizeAsPercentageOfPhysicalMemory(getHostClusterId().isPresent() ? 17 : 60); } jvmBuilder.gcopts(Objects.requireNonNullElse(jvmGCOptions, G1GC)); if (environmentVars != null) { builder.qrs.env(environmentVars); } } @Override public void getConfig(DocprocConfig.Builder builder) { if (containerDocproc != null) containerDocproc.getConfig(builder); } @Override public void getConfig(PageTemplatesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(SemanticRulesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(QueryProfilesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(SchemamappingConfig.Builder builder) { if (containerDocproc != null) containerDocproc.getConfig(builder); } @Override public void getConfig(IndexInfoConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(ContainerMbusConfig.Builder builder) { if (mbusParams != null) { if (mbusParams.maxConcurrentFactor != null) builder.maxConcurrentFactor(mbusParams.maxConcurrentFactor); if (mbusParams.documentExpansionFactor != null) builder.documentExpansionFactor(mbusParams.documentExpansionFactor); if (mbusParams.containerCoreMemory != null) builder.containerCoreMemory(mbusParams.containerCoreMemory); } if (containerDocproc != null) containerDocproc.getConfig(builder); } public void setMbusParams(MbusParams mbusParams) { this.mbusParams = mbusParams; } public void initialize(Map<String, AbstractSearchCluster> clusterMap) { if (containerSearch != null) containerSearch.connectSearchClusters(clusterMap); } public void addDefaultSearchAccessLog() { addComponent(new AccessLogComponent(AccessLogComponent.AccessLogType.jsonAccessLog, getName(), isHostedVespa)); } @Override public void getConfig(IlscriptsConfig.Builder builder) { List<AbstractSearchCluster> searchClusters = new ArrayList<>(); searchClusters.addAll(Content.getSearchClusters(getRoot().configModelRepo())); for (AbstractSearchCluster searchCluster : searchClusters) { searchCluster.getConfig(builder); } } @Override public void getConfig(ClusterInfoConfig.Builder builder) { builder.clusterId(name); builder.nodeCount(containers.size()); for (Service service : getDescendantServices()) { builder.services.add(new ClusterInfoConfig.Services.Builder() .index(Integer.parseInt(service.getServicePropertyString("index", "99999"))) .hostname(service.getHostName()) .ports(getPorts(service))); } } /** * Returns a config server config containing the right zone settings (and defaults for the rest). * This is useful to allow applications to find out in which zone they are runnung by having the Zone * object (which is constructed from this config) injected. */ @Override public void getConfig(ConfigserverConfig.Builder builder) { builder.system(zone.system().value()); builder.environment(zone.environment().value()); builder.region(zone.region().value()); } private List<ClusterInfoConfig.Services.Ports.Builder> getPorts(Service service) { List<ClusterInfoConfig.Services.Ports.Builder> builders = new ArrayList<>(); PortsMeta portsMeta = service.getPortsMeta(); for (int i = 0; i < portsMeta.getNumPorts(); i++) { builders.add(new ClusterInfoConfig.Services.Ports.Builder() .number(service.getRelativePort(i)) .tags(ApplicationConfigProducerRoot.getPortTags(portsMeta, i)) ); } return builders; } public boolean isHostedVespa() { return isHostedVespa; } @Override public void getConfig(RoutingProviderConfig.Builder builder) { builder.enabled(isHostedVespa); } public Map<String, String> concreteDocumentTypes() { return concreteDocumentTypes; } /** The configured service aliases for the service in this cluster */ public List<String> serviceAliases() { return serviceAliases; } /** The configured endpoint aliases (fqdn) for the service in this cluster */ public List<String> endpointAliases() { return endpointAliases; } public void setHostClusterId(String clusterId) { hostClusterId = clusterId; } /** * Returns the id of the content cluster which hosts this container cluster, if any. * This is only set with hosted clusters where this container cluster is set up to run on the nodes * of a content cluster. */ public Optional<String> getHostClusterId() { return Optional.ofNullable(hostClusterId); } public void setMemoryPercentage(Integer memoryPercentage) { this.memoryPercentage = memoryPercentage; } public void setJvmGCOptions(String opts) { this.jvmGCOptions = opts; } public void setEnvironmentVars(String environmentVars) { this.environmentVars = environmentVars; } public Optional<String> getJvmGCOptions() { return Optional.ofNullable(jvmGCOptions); } /** * Returns the percentage of host physical memory this application has specified for nodes in this cluster, * or empty if this is not specified by the application. */ public Optional<Integer> getMemoryPercentage() { return Optional.ofNullable(memoryPercentage); } public final void setMessageBusEnabled(boolean messageBusEnabled) { this.messageBusEnabled = messageBusEnabled; } boolean messageBusEnabled() { return messageBusEnabled; } public final void setRpcServerEnabled(boolean rpcServerEnabled) { this.rpcServerEnabled = rpcServerEnabled; } boolean rpcServerEnabled() { return rpcServerEnabled; } boolean httpServerEnabled() { return httpServerEnabled; } public void setHttpServerEnabled(boolean httpServerEnabled) { this.httpServerEnabled = httpServerEnabled; } @Override public String toString() { return "container cluster '" + getName() + "'"; } public static class MbusParams { final Double maxConcurrentFactor; final Double documentExpansionFactor; final Integer containerCoreMemory; public MbusParams(Double maxConcurrentFactor, Double documentExpansionFactor, Integer containerCoreMemory) { this.maxConcurrentFactor = maxConcurrentFactor; this.documentExpansionFactor = documentExpansionFactor; this.containerCoreMemory = containerCoreMemory; } } }
Copied the old string to be _certain_ the path was correct, without typos ... but didn't think about the prefix being elsewhere :p
private void addTestrunnerComponentsIfTester(DeployState deployState) { if (deployState.getProperties().applicationId().instance().isTester()) addPlatformBundle(Paths.get(Defaults.getDefaults().underVespaHome("vespa-testrunner-components-jar-with-dependencies.jar"))); }
addPlatformBundle(Paths.get(Defaults.getDefaults().underVespaHome("vespa-testrunner-components-jar-with-dependencies.jar")));
private void addTestrunnerComponentsIfTester(DeployState deployState) { if (deployState.getProperties().applicationId().instance().isTester()) addPlatformBundle(Paths.get(Defaults.getDefaults().underVespaHome("vespa-testrunner-components-jar-with-dependencies.jar"))); }
class ContainerCluster<CONTAINER extends Container> extends AbstractConfigProducer<AbstractConfigProducer<?>> implements ComponentsConfig.Producer, JdiscBindingsConfig.Producer, DocumentmanagerConfig.Producer, ContainerMbusConfig.Producer, ContainerDocumentConfig.Producer, HealthMonitorConfig.Producer, ApplicationMetadataConfig.Producer, BundlesConfig.Producer, IndexInfoConfig.Producer, IlscriptsConfig.Producer, SchemamappingConfig.Producer, QrSearchersConfig.Producer, QrStartConfig.Producer, QueryProfilesConfig.Producer, PageTemplatesConfig.Producer, SemanticRulesConfig.Producer, DocprocConfig.Producer, ClusterInfoConfig.Producer, RoutingProviderConfig.Producer, ConfigserverConfig.Producer { /** * URI prefix used for internal, usually programmatic, APIs. URIs using this * prefix should never considered available for direct use by customers, and * normal compatibility concerns only applies to libraries using the URIs in * question, not contents served from the URIs themselves. */ public static final String RESERVED_URI_PREFIX = "reserved-for-internal-use"; public static final String APPLICATION_STATUS_HANDLER_CLASS = "com.yahoo.container.handler.observability.ApplicationStatusHandler"; public static final String BINDINGS_OVERVIEW_HANDLER_CLASS = BindingsOverviewHandler.class.getName(); public static final String STATE_HANDLER_CLASS = "com.yahoo.container.jdisc.state.StateHandler"; public static final String STATISTICS_HANDLER_CLASS = "com.yahoo.container.config.StatisticsRequestHandler"; public static final String LOG_HANDLER_CLASS = com.yahoo.container.handler.LogHandler.class.getName(); public static final String DEFAULT_LINGUISTICS_PROVIDER = "com.yahoo.language.provider.DefaultLinguisticsProvider"; public static final String CMS = "-XX:+UseConcMarkSweepGC -XX:MaxTenuringThreshold=15 -XX:NewRatio=1"; public static final String G1GC = "-XX:+UseG1GC -XX:MaxTenuringThreshold=15"; public static final String ROOT_HANDLER_BINDING = "*: private final String name; protected List<CONTAINER> containers = new ArrayList<>(); private Http http; private ProcessingChains processingChains; private ContainerSearch containerSearch; private ContainerDocproc containerDocproc; private ContainerDocumentApi containerDocumentApi; private SecretStore secretStore; private MbusParams mbusParams; private boolean messageBusEnabled = true; private boolean rpcServerEnabled = true; private boolean httpServerEnabled = true; private final Set<Path> platformBundles = new LinkedHashSet<>(); private final List<String> serviceAliases = new ArrayList<>(); private final List<String> endpointAliases = new ArrayList<>(); private final ComponentGroup<Component<?, ?>> componentGroup; private final boolean isHostedVespa; private Map<String, String> concreteDocumentTypes = new LinkedHashMap<>(); private ApplicationMetaData applicationMetaData = null; /** The zone this is deployed in, or the default zone if not on hosted Vespa */ private Zone zone; private String hostClusterId = null; private String jvmGCOptions = null; private String environmentVars = null; private Integer memoryPercentage = null; public ContainerCluster(AbstractConfigProducer<?> parent, String subId, String name, DeployState deployState) { super(parent, subId); this.name = name; this.isHostedVespa = stateIsHosted(deployState); this.zone = (deployState != null) ? deployState.zone() : Zone.defaultZone(); componentGroup = new ComponentGroup<>(this, "component"); addComponent(new StatisticsComponent()); addSimpleComponent(AccessLog.class); addSimpleComponent(ThreadPoolProvider.class); addSimpleComponent(com.yahoo.concurrent.classlock.ClassLocking.class); addSimpleComponent(SecurityFilterInvoker.class); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricConsumerProviderProvider"); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricProvider"); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricUpdater"); addSimpleComponent(com.yahoo.container.jdisc.LoggingRequestHandler.Context.class); addSimpleComponent(com.yahoo.metrics.simple.MetricManager.class.getName(), null, MetricProperties.BUNDLE_SYMBOLIC_NAME); addSimpleComponent(com.yahoo.metrics.simple.jdisc.JdiscMetricsFactory.class.getName(), null, MetricProperties.BUNDLE_SYMBOLIC_NAME); addSimpleComponent("com.yahoo.container.jdisc.state.StateMonitor"); addSimpleComponent("com.yahoo.container.jdisc.ContainerThreadFactory"); addSimpleComponent("com.yahoo.container.handler.VipStatus"); addSimpleComponent(com.yahoo.container.handler.ClustersStatus.class.getName()); addJaxProviders(); addTestrunnerComponentsIfTester(deployState); } public void setZone(Zone zone) { this.zone = zone; } public Zone getZone() { return zone; } public void addDefaultHandlersWithVip() { addDefaultHandlersExceptStatus(); addVipHandler(); } public final void addDefaultHandlersExceptStatus() { addDefaultRootHandler(); addMetricStateHandler(); addApplicationStatusHandler(); } public void addMetricStateHandler() { Handler<AbstractConfigProducer<?>> stateHandler = new Handler<>( new ComponentModel(STATE_HANDLER_CLASS, null, null, null)); stateHandler.addServerBindings("http: "http: addComponent(stateHandler); } public void addDefaultRootHandler() { if (hasHandlerWithBinding(ROOT_HANDLER_BINDING)) return; Handler<AbstractConfigProducer<?>> handler = new Handler<>( new ComponentModel(BundleInstantiationSpecification.getFromStrings( BINDINGS_OVERVIEW_HANDLER_CLASS, null, null), null)); handler.addServerBindings(ROOT_HANDLER_BINDING); addComponent(handler); } private boolean hasHandlerWithBinding(String binding) { Collection<Handler<?>> handlers = getHandlers(); for (Handler handler : handlers) { if (handler.getServerBindings().contains(binding)) return true; } return false; } public void addApplicationStatusHandler() { Handler<AbstractConfigProducer<?>> statusHandler = new Handler<>( new ComponentModel(BundleInstantiationSpecification.getInternalHandlerSpecificationFromStrings( APPLICATION_STATUS_HANDLER_CLASS, null), null)); statusHandler.addServerBindings("http: addComponent(statusHandler); } public void addVipHandler() { Handler<?> vipHandler = Handler.fromClassName(FileStatusHandlerComponent.CLASS); vipHandler.addServerBindings("http: addComponent(vipHandler); } @SuppressWarnings("deprecation") private void addJaxProviders() { addSimpleComponent(com.yahoo.container.xml.providers.DatatypeFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.DocumentBuilderFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.SAXParserFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.SchemaFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.TransformerFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLEventFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLInputFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLOutputFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XPathFactoryProvider.class); } public final void addComponent(Component<?, ?> component) { componentGroup.addComponent(component); } public final void addSimpleComponent(String idSpec, String classSpec, String bundleSpec) { addComponent(new SimpleComponent(new ComponentModel(idSpec, classSpec, bundleSpec))); } /** * Removes a component by id * * @return the removed component, or null if it was not present */ public Component removeComponent(ComponentId componentId) { return componentGroup.removeComponent(componentId); } private void addSimpleComponent(Class<?> clazz) { addSimpleComponent(clazz.getName()); } protected void addSimpleComponent(String className) { addComponent(new SimpleComponent(className)); } public void prepare(DeployState deployState) { applicationMetaData = deployState.getApplicationPackage().getMetaData(); doPrepare(deployState); } protected abstract void doPrepare(DeployState deployState); public void addMbusServer(ComponentId chainId) { ComponentId serviceId = chainId.nestInNamespace(ComponentId.fromString("MbusServer")); addComponent( new Component<>(new ComponentModel(new BundleInstantiationSpecification( serviceId, ComponentSpecification.fromString(MbusServerProvider.class.getName()), null)))); } public String getName() { return name; } public List<CONTAINER> getContainers() { return Collections.unmodifiableList(containers); } public void addContainer(CONTAINER container) { container.setClusterName(name); container.setProp("clustername", name) .setProp("index", this.containers.size()); containers.add(container); } public void addContainers(Collection<CONTAINER> containers) { containers.forEach(this::addContainer); } public void setProcessingChains(ProcessingChains processingChains, String... serverBindings) { if (this.processingChains != null) throw new IllegalStateException("ProcessingChains should only be set once."); this.processingChains = processingChains; ProcessingHandler<?> processingHandler = new ProcessingHandler<>( processingChains, "com.yahoo.processing.handler.ProcessingHandler"); for (String binding: serverBindings) processingHandler.addServerBindings(binding); addComponent(processingHandler); } ProcessingChains getProcessingChains() { return processingChains; } @NonNull public SearchChains getSearchChains() { if (containerSearch == null) throw new IllegalStateException("Search components not found in container cluster '" + getSubId() + "': Add <search/> to the cluster in services.xml"); return containerSearch.getChains(); } @Nullable public ContainerSearch getSearch() { return containerSearch; } public void setSearch(ContainerSearch containerSearch) { this.containerSearch = containerSearch; } public void setHttp(Http http) { this.http = http; addChild(http); } @Nullable public Http getHttp() { return http; } @Nullable public ContainerDocproc getDocproc() { return containerDocproc; } public void setDocproc(ContainerDocproc containerDocproc) { this.containerDocproc = containerDocproc; } @Nullable public ContainerDocumentApi getDocumentApi() { return containerDocumentApi; } public void setDocumentApi(ContainerDocumentApi containerDocumentApi) { this.containerDocumentApi = containerDocumentApi; } @NonNull public DocprocChains getDocprocChains() { if (containerDocproc == null) throw new IllegalStateException("Document processing components not found in container cluster '" + getSubId() + "': Add <document-processing/> to the cluster in services.xml"); return containerDocproc.getChains(); } @SuppressWarnings("unchecked") public Collection<Handler<?>> getHandlers() { return (Collection<Handler<?>>)(Collection)componentGroup.getComponents(Handler.class); } public void setSecretStore(SecretStore secretStore) { this.secretStore = secretStore; } public Optional<SecretStore> getSecretStore() { return Optional.ofNullable(secretStore); } public Map<ComponentId, Component<?, ?>> getComponentsMap() { return componentGroup.getComponentMap(); } /** Returns all components in this cluster (generic, handlers, chained) */ public Collection<Component<?, ?>> getAllComponents() { List<Component<?, ?>> allComponents = new ArrayList<>(); recursivelyFindAllComponents(allComponents, this); Collections.sort(allComponents); return Collections.unmodifiableCollection(allComponents); } private void recursivelyFindAllComponents(Collection<Component<?, ?>> allComponents, AbstractConfigProducer<?> current) { for (AbstractConfigProducer<?> child: current.getChildren().values()) { if (child instanceof Component) allComponents.add((Component<?, ?>) child); if (!(child instanceof Container)) recursivelyFindAllComponents(allComponents, child); } } @Override public void getConfig(ComponentsConfig.Builder builder) { builder.components.addAll(ComponentsConfigGenerator.generate(getAllComponents())); builder.components(new ComponentsConfig.Components.Builder().id("com.yahoo.container.core.config.HandlersConfigurerDi$RegistriesHack")); } @Override public void getConfig(JdiscBindingsConfig.Builder builder) { builder.handlers.putAll(DiscBindingsConfigGenerator.generate(getHandlers())); } @Override public void getConfig(DocumentmanagerConfig.Builder builder) { if (containerDocproc != null && containerDocproc.isCompressDocuments()) builder.enablecompression(true); } @Override public void getConfig(ContainerDocumentConfig.Builder builder) { for (Map.Entry<String, String> e : concreteDocumentTypes.entrySet()) { ContainerDocumentConfig.Doctype.Builder dtb = new ContainerDocumentConfig.Doctype.Builder(); dtb.type(e.getKey()); dtb.factorycomponent(e.getValue()); builder.doctype(dtb); } } @Override public void getConfig(HealthMonitorConfig.Builder builder) { Monitoring monitoring = getMonitoringService(); if (monitoring != null) { builder.snapshot_interval(monitoring.getIntervalSeconds()); } } @Override public void getConfig(ApplicationMetadataConfig.Builder builder) { if (applicationMetaData != null) { builder.name(applicationMetaData.getApplicationName()). user(applicationMetaData.getDeployedByUser()). path(applicationMetaData.getDeployPath()). timestamp(applicationMetaData.getDeployTimestamp()). checksum(applicationMetaData.getCheckSum()). generation(applicationMetaData.getGeneration()); } } /** * Adds a bundle present at a known location at the target container nodes. * * @param bundlePath usually an absolute path, e.g. '$VESPA_HOME/lib/jars/foo.jar' */ public final void addPlatformBundle(Path bundlePath) { platformBundles.add(bundlePath); } @Override public void getConfig(BundlesConfig.Builder builder) { platformBundles.stream() .map(ContainerCluster::toFileReferenceString) .forEach(builder::bundle); } private static String toFileReferenceString(Path path) { return DISK_BUNDLE_PREFIX + path.toString(); } @Override public void getConfig(QrSearchersConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(QrStartConfig.Builder builder) { QrStartConfig.Jvm.Builder jvmBuilder = builder.jvm; if (getMemoryPercentage().isPresent()) { jvmBuilder.heapSizeAsPercentageOfPhysicalMemory(getMemoryPercentage().get()); } else if (isHostedVespa()) { jvmBuilder.heapSizeAsPercentageOfPhysicalMemory(getHostClusterId().isPresent() ? 17 : 60); } jvmBuilder.gcopts(Objects.requireNonNullElse(jvmGCOptions, G1GC)); if (environmentVars != null) { builder.qrs.env(environmentVars); } } @Override public void getConfig(DocprocConfig.Builder builder) { if (containerDocproc != null) containerDocproc.getConfig(builder); } @Override public void getConfig(PageTemplatesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(SemanticRulesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(QueryProfilesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(SchemamappingConfig.Builder builder) { if (containerDocproc != null) containerDocproc.getConfig(builder); } @Override public void getConfig(IndexInfoConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(ContainerMbusConfig.Builder builder) { if (mbusParams != null) { if (mbusParams.maxConcurrentFactor != null) builder.maxConcurrentFactor(mbusParams.maxConcurrentFactor); if (mbusParams.documentExpansionFactor != null) builder.documentExpansionFactor(mbusParams.documentExpansionFactor); if (mbusParams.containerCoreMemory != null) builder.containerCoreMemory(mbusParams.containerCoreMemory); } if (containerDocproc != null) containerDocproc.getConfig(builder); } public void setMbusParams(MbusParams mbusParams) { this.mbusParams = mbusParams; } public void initialize(Map<String, AbstractSearchCluster> clusterMap) { if (containerSearch != null) containerSearch.connectSearchClusters(clusterMap); } public void addDefaultSearchAccessLog() { addComponent(new AccessLogComponent(AccessLogComponent.AccessLogType.jsonAccessLog, getName(), isHostedVespa)); } @Override public void getConfig(IlscriptsConfig.Builder builder) { List<AbstractSearchCluster> searchClusters = new ArrayList<>(); searchClusters.addAll(Content.getSearchClusters(getRoot().configModelRepo())); for (AbstractSearchCluster searchCluster : searchClusters) { searchCluster.getConfig(builder); } } @Override public void getConfig(ClusterInfoConfig.Builder builder) { builder.clusterId(name); builder.nodeCount(containers.size()); for (Service service : getDescendantServices()) { builder.services.add(new ClusterInfoConfig.Services.Builder() .index(Integer.parseInt(service.getServicePropertyString("index", "99999"))) .hostname(service.getHostName()) .ports(getPorts(service))); } } /** * Returns a config server config containing the right zone settings (and defaults for the rest). * This is useful to allow applications to find out in which zone they are runnung by having the Zone * object (which is constructed from this config) injected. */ @Override public void getConfig(ConfigserverConfig.Builder builder) { builder.system(zone.system().value()); builder.environment(zone.environment().value()); builder.region(zone.region().value()); } private List<ClusterInfoConfig.Services.Ports.Builder> getPorts(Service service) { List<ClusterInfoConfig.Services.Ports.Builder> builders = new ArrayList<>(); PortsMeta portsMeta = service.getPortsMeta(); for (int i = 0; i < portsMeta.getNumPorts(); i++) { builders.add(new ClusterInfoConfig.Services.Ports.Builder() .number(service.getRelativePort(i)) .tags(ApplicationConfigProducerRoot.getPortTags(portsMeta, i)) ); } return builders; } public boolean isHostedVespa() { return isHostedVespa; } @Override public void getConfig(RoutingProviderConfig.Builder builder) { builder.enabled(isHostedVespa); } public Map<String, String> concreteDocumentTypes() { return concreteDocumentTypes; } /** The configured service aliases for the service in this cluster */ public List<String> serviceAliases() { return serviceAliases; } /** The configured endpoint aliases (fqdn) for the service in this cluster */ public List<String> endpointAliases() { return endpointAliases; } public void setHostClusterId(String clusterId) { hostClusterId = clusterId; } /** * Returns the id of the content cluster which hosts this container cluster, if any. * This is only set with hosted clusters where this container cluster is set up to run on the nodes * of a content cluster. */ public Optional<String> getHostClusterId() { return Optional.ofNullable(hostClusterId); } public void setMemoryPercentage(Integer memoryPercentage) { this.memoryPercentage = memoryPercentage; } public void setJvmGCOptions(String opts) { this.jvmGCOptions = opts; } public void setEnvironmentVars(String environmentVars) { this.environmentVars = environmentVars; } public Optional<String> getJvmGCOptions() { return Optional.ofNullable(jvmGCOptions); } /** * Returns the percentage of host physical memory this application has specified for nodes in this cluster, * or empty if this is not specified by the application. */ public Optional<Integer> getMemoryPercentage() { return Optional.ofNullable(memoryPercentage); } public final void setMessageBusEnabled(boolean messageBusEnabled) { this.messageBusEnabled = messageBusEnabled; } boolean messageBusEnabled() { return messageBusEnabled; } public final void setRpcServerEnabled(boolean rpcServerEnabled) { this.rpcServerEnabled = rpcServerEnabled; } boolean rpcServerEnabled() { return rpcServerEnabled; } boolean httpServerEnabled() { return httpServerEnabled; } public void setHttpServerEnabled(boolean httpServerEnabled) { this.httpServerEnabled = httpServerEnabled; } @Override public String toString() { return "container cluster '" + getName() + "'"; } public static class MbusParams { final Double maxConcurrentFactor; final Double documentExpansionFactor; final Integer containerCoreMemory; public MbusParams(Double maxConcurrentFactor, Double documentExpansionFactor, Integer containerCoreMemory) { this.maxConcurrentFactor = maxConcurrentFactor; this.documentExpansionFactor = documentExpansionFactor; this.containerCoreMemory = containerCoreMemory; } } }
class ContainerCluster<CONTAINER extends Container> extends AbstractConfigProducer<AbstractConfigProducer<?>> implements ComponentsConfig.Producer, JdiscBindingsConfig.Producer, DocumentmanagerConfig.Producer, ContainerMbusConfig.Producer, ContainerDocumentConfig.Producer, HealthMonitorConfig.Producer, ApplicationMetadataConfig.Producer, BundlesConfig.Producer, IndexInfoConfig.Producer, IlscriptsConfig.Producer, SchemamappingConfig.Producer, QrSearchersConfig.Producer, QrStartConfig.Producer, QueryProfilesConfig.Producer, PageTemplatesConfig.Producer, SemanticRulesConfig.Producer, DocprocConfig.Producer, ClusterInfoConfig.Producer, RoutingProviderConfig.Producer, ConfigserverConfig.Producer { /** * URI prefix used for internal, usually programmatic, APIs. URIs using this * prefix should never considered available for direct use by customers, and * normal compatibility concerns only applies to libraries using the URIs in * question, not contents served from the URIs themselves. */ public static final String RESERVED_URI_PREFIX = "reserved-for-internal-use"; public static final String APPLICATION_STATUS_HANDLER_CLASS = "com.yahoo.container.handler.observability.ApplicationStatusHandler"; public static final String BINDINGS_OVERVIEW_HANDLER_CLASS = BindingsOverviewHandler.class.getName(); public static final String STATE_HANDLER_CLASS = "com.yahoo.container.jdisc.state.StateHandler"; public static final String STATISTICS_HANDLER_CLASS = "com.yahoo.container.config.StatisticsRequestHandler"; public static final String LOG_HANDLER_CLASS = com.yahoo.container.handler.LogHandler.class.getName(); public static final String DEFAULT_LINGUISTICS_PROVIDER = "com.yahoo.language.provider.DefaultLinguisticsProvider"; public static final String CMS = "-XX:+UseConcMarkSweepGC -XX:MaxTenuringThreshold=15 -XX:NewRatio=1"; public static final String G1GC = "-XX:+UseG1GC -XX:MaxTenuringThreshold=15"; public static final String ROOT_HANDLER_BINDING = "*: private final String name; protected List<CONTAINER> containers = new ArrayList<>(); private Http http; private ProcessingChains processingChains; private ContainerSearch containerSearch; private ContainerDocproc containerDocproc; private ContainerDocumentApi containerDocumentApi; private SecretStore secretStore; private MbusParams mbusParams; private boolean messageBusEnabled = true; private boolean rpcServerEnabled = true; private boolean httpServerEnabled = true; private final Set<Path> platformBundles = new LinkedHashSet<>(); private final List<String> serviceAliases = new ArrayList<>(); private final List<String> endpointAliases = new ArrayList<>(); private final ComponentGroup<Component<?, ?>> componentGroup; private final boolean isHostedVespa; private Map<String, String> concreteDocumentTypes = new LinkedHashMap<>(); private ApplicationMetaData applicationMetaData = null; /** The zone this is deployed in, or the default zone if not on hosted Vespa */ private Zone zone; private String hostClusterId = null; private String jvmGCOptions = null; private String environmentVars = null; private Integer memoryPercentage = null; public ContainerCluster(AbstractConfigProducer<?> parent, String subId, String name, DeployState deployState) { super(parent, subId); this.name = name; this.isHostedVespa = stateIsHosted(deployState); this.zone = (deployState != null) ? deployState.zone() : Zone.defaultZone(); componentGroup = new ComponentGroup<>(this, "component"); addComponent(new StatisticsComponent()); addSimpleComponent(AccessLog.class); addSimpleComponent(ThreadPoolProvider.class); addSimpleComponent(com.yahoo.concurrent.classlock.ClassLocking.class); addSimpleComponent(SecurityFilterInvoker.class); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricConsumerProviderProvider"); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricProvider"); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricUpdater"); addSimpleComponent(com.yahoo.container.jdisc.LoggingRequestHandler.Context.class); addSimpleComponent(com.yahoo.metrics.simple.MetricManager.class.getName(), null, MetricProperties.BUNDLE_SYMBOLIC_NAME); addSimpleComponent(com.yahoo.metrics.simple.jdisc.JdiscMetricsFactory.class.getName(), null, MetricProperties.BUNDLE_SYMBOLIC_NAME); addSimpleComponent("com.yahoo.container.jdisc.state.StateMonitor"); addSimpleComponent("com.yahoo.container.jdisc.ContainerThreadFactory"); addSimpleComponent("com.yahoo.container.handler.VipStatus"); addSimpleComponent(com.yahoo.container.handler.ClustersStatus.class.getName()); addJaxProviders(); addTestrunnerComponentsIfTester(deployState); } public void setZone(Zone zone) { this.zone = zone; } public Zone getZone() { return zone; } public void addDefaultHandlersWithVip() { addDefaultHandlersExceptStatus(); addVipHandler(); } public final void addDefaultHandlersExceptStatus() { addDefaultRootHandler(); addMetricStateHandler(); addApplicationStatusHandler(); } public void addMetricStateHandler() { Handler<AbstractConfigProducer<?>> stateHandler = new Handler<>( new ComponentModel(STATE_HANDLER_CLASS, null, null, null)); stateHandler.addServerBindings("http: "http: addComponent(stateHandler); } public void addDefaultRootHandler() { if (hasHandlerWithBinding(ROOT_HANDLER_BINDING)) return; Handler<AbstractConfigProducer<?>> handler = new Handler<>( new ComponentModel(BundleInstantiationSpecification.getFromStrings( BINDINGS_OVERVIEW_HANDLER_CLASS, null, null), null)); handler.addServerBindings(ROOT_HANDLER_BINDING); addComponent(handler); } private boolean hasHandlerWithBinding(String binding) { Collection<Handler<?>> handlers = getHandlers(); for (Handler handler : handlers) { if (handler.getServerBindings().contains(binding)) return true; } return false; } public void addApplicationStatusHandler() { Handler<AbstractConfigProducer<?>> statusHandler = new Handler<>( new ComponentModel(BundleInstantiationSpecification.getInternalHandlerSpecificationFromStrings( APPLICATION_STATUS_HANDLER_CLASS, null), null)); statusHandler.addServerBindings("http: addComponent(statusHandler); } public void addVipHandler() { Handler<?> vipHandler = Handler.fromClassName(FileStatusHandlerComponent.CLASS); vipHandler.addServerBindings("http: addComponent(vipHandler); } @SuppressWarnings("deprecation") private void addJaxProviders() { addSimpleComponent(com.yahoo.container.xml.providers.DatatypeFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.DocumentBuilderFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.SAXParserFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.SchemaFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.TransformerFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLEventFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLInputFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLOutputFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XPathFactoryProvider.class); } public final void addComponent(Component<?, ?> component) { componentGroup.addComponent(component); } public final void addSimpleComponent(String idSpec, String classSpec, String bundleSpec) { addComponent(new SimpleComponent(new ComponentModel(idSpec, classSpec, bundleSpec))); } /** * Removes a component by id * * @return the removed component, or null if it was not present */ public Component removeComponent(ComponentId componentId) { return componentGroup.removeComponent(componentId); } private void addSimpleComponent(Class<?> clazz) { addSimpleComponent(clazz.getName()); } protected void addSimpleComponent(String className) { addComponent(new SimpleComponent(className)); } public void prepare(DeployState deployState) { applicationMetaData = deployState.getApplicationPackage().getMetaData(); doPrepare(deployState); } protected abstract void doPrepare(DeployState deployState); public void addMbusServer(ComponentId chainId) { ComponentId serviceId = chainId.nestInNamespace(ComponentId.fromString("MbusServer")); addComponent( new Component<>(new ComponentModel(new BundleInstantiationSpecification( serviceId, ComponentSpecification.fromString(MbusServerProvider.class.getName()), null)))); } public String getName() { return name; } public List<CONTAINER> getContainers() { return Collections.unmodifiableList(containers); } public void addContainer(CONTAINER container) { container.setClusterName(name); container.setProp("clustername", name) .setProp("index", this.containers.size()); containers.add(container); } public void addContainers(Collection<CONTAINER> containers) { containers.forEach(this::addContainer); } public void setProcessingChains(ProcessingChains processingChains, String... serverBindings) { if (this.processingChains != null) throw new IllegalStateException("ProcessingChains should only be set once."); this.processingChains = processingChains; ProcessingHandler<?> processingHandler = new ProcessingHandler<>( processingChains, "com.yahoo.processing.handler.ProcessingHandler"); for (String binding: serverBindings) processingHandler.addServerBindings(binding); addComponent(processingHandler); } ProcessingChains getProcessingChains() { return processingChains; } @NonNull public SearchChains getSearchChains() { if (containerSearch == null) throw new IllegalStateException("Search components not found in container cluster '" + getSubId() + "': Add <search/> to the cluster in services.xml"); return containerSearch.getChains(); } @Nullable public ContainerSearch getSearch() { return containerSearch; } public void setSearch(ContainerSearch containerSearch) { this.containerSearch = containerSearch; } public void setHttp(Http http) { this.http = http; addChild(http); } @Nullable public Http getHttp() { return http; } @Nullable public ContainerDocproc getDocproc() { return containerDocproc; } public void setDocproc(ContainerDocproc containerDocproc) { this.containerDocproc = containerDocproc; } @Nullable public ContainerDocumentApi getDocumentApi() { return containerDocumentApi; } public void setDocumentApi(ContainerDocumentApi containerDocumentApi) { this.containerDocumentApi = containerDocumentApi; } @NonNull public DocprocChains getDocprocChains() { if (containerDocproc == null) throw new IllegalStateException("Document processing components not found in container cluster '" + getSubId() + "': Add <document-processing/> to the cluster in services.xml"); return containerDocproc.getChains(); } @SuppressWarnings("unchecked") public Collection<Handler<?>> getHandlers() { return (Collection<Handler<?>>)(Collection)componentGroup.getComponents(Handler.class); } public void setSecretStore(SecretStore secretStore) { this.secretStore = secretStore; } public Optional<SecretStore> getSecretStore() { return Optional.ofNullable(secretStore); } public Map<ComponentId, Component<?, ?>> getComponentsMap() { return componentGroup.getComponentMap(); } /** Returns all components in this cluster (generic, handlers, chained) */ public Collection<Component<?, ?>> getAllComponents() { List<Component<?, ?>> allComponents = new ArrayList<>(); recursivelyFindAllComponents(allComponents, this); Collections.sort(allComponents); return Collections.unmodifiableCollection(allComponents); } private void recursivelyFindAllComponents(Collection<Component<?, ?>> allComponents, AbstractConfigProducer<?> current) { for (AbstractConfigProducer<?> child: current.getChildren().values()) { if (child instanceof Component) allComponents.add((Component<?, ?>) child); if (!(child instanceof Container)) recursivelyFindAllComponents(allComponents, child); } } @Override public void getConfig(ComponentsConfig.Builder builder) { builder.components.addAll(ComponentsConfigGenerator.generate(getAllComponents())); builder.components(new ComponentsConfig.Components.Builder().id("com.yahoo.container.core.config.HandlersConfigurerDi$RegistriesHack")); } @Override public void getConfig(JdiscBindingsConfig.Builder builder) { builder.handlers.putAll(DiscBindingsConfigGenerator.generate(getHandlers())); } @Override public void getConfig(DocumentmanagerConfig.Builder builder) { if (containerDocproc != null && containerDocproc.isCompressDocuments()) builder.enablecompression(true); } @Override public void getConfig(ContainerDocumentConfig.Builder builder) { for (Map.Entry<String, String> e : concreteDocumentTypes.entrySet()) { ContainerDocumentConfig.Doctype.Builder dtb = new ContainerDocumentConfig.Doctype.Builder(); dtb.type(e.getKey()); dtb.factorycomponent(e.getValue()); builder.doctype(dtb); } } @Override public void getConfig(HealthMonitorConfig.Builder builder) { Monitoring monitoring = getMonitoringService(); if (monitoring != null) { builder.snapshot_interval(monitoring.getIntervalSeconds()); } } @Override public void getConfig(ApplicationMetadataConfig.Builder builder) { if (applicationMetaData != null) { builder.name(applicationMetaData.getApplicationName()). user(applicationMetaData.getDeployedByUser()). path(applicationMetaData.getDeployPath()). timestamp(applicationMetaData.getDeployTimestamp()). checksum(applicationMetaData.getCheckSum()). generation(applicationMetaData.getGeneration()); } } /** * Adds a bundle present at a known location at the target container nodes. * * @param bundlePath usually an absolute path, e.g. '$VESPA_HOME/lib/jars/foo.jar' */ public final void addPlatformBundle(Path bundlePath) { platformBundles.add(bundlePath); } @Override public void getConfig(BundlesConfig.Builder builder) { platformBundles.stream() .map(ContainerCluster::toFileReferenceString) .forEach(builder::bundle); } private static String toFileReferenceString(Path path) { return DISK_BUNDLE_PREFIX + path.toString(); } @Override public void getConfig(QrSearchersConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(QrStartConfig.Builder builder) { QrStartConfig.Jvm.Builder jvmBuilder = builder.jvm; if (getMemoryPercentage().isPresent()) { jvmBuilder.heapSizeAsPercentageOfPhysicalMemory(getMemoryPercentage().get()); } else if (isHostedVespa()) { jvmBuilder.heapSizeAsPercentageOfPhysicalMemory(getHostClusterId().isPresent() ? 17 : 60); } jvmBuilder.gcopts(Objects.requireNonNullElse(jvmGCOptions, G1GC)); if (environmentVars != null) { builder.qrs.env(environmentVars); } } @Override public void getConfig(DocprocConfig.Builder builder) { if (containerDocproc != null) containerDocproc.getConfig(builder); } @Override public void getConfig(PageTemplatesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(SemanticRulesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(QueryProfilesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(SchemamappingConfig.Builder builder) { if (containerDocproc != null) containerDocproc.getConfig(builder); } @Override public void getConfig(IndexInfoConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(ContainerMbusConfig.Builder builder) { if (mbusParams != null) { if (mbusParams.maxConcurrentFactor != null) builder.maxConcurrentFactor(mbusParams.maxConcurrentFactor); if (mbusParams.documentExpansionFactor != null) builder.documentExpansionFactor(mbusParams.documentExpansionFactor); if (mbusParams.containerCoreMemory != null) builder.containerCoreMemory(mbusParams.containerCoreMemory); } if (containerDocproc != null) containerDocproc.getConfig(builder); } public void setMbusParams(MbusParams mbusParams) { this.mbusParams = mbusParams; } public void initialize(Map<String, AbstractSearchCluster> clusterMap) { if (containerSearch != null) containerSearch.connectSearchClusters(clusterMap); } public void addDefaultSearchAccessLog() { addComponent(new AccessLogComponent(AccessLogComponent.AccessLogType.jsonAccessLog, getName(), isHostedVespa)); } @Override public void getConfig(IlscriptsConfig.Builder builder) { List<AbstractSearchCluster> searchClusters = new ArrayList<>(); searchClusters.addAll(Content.getSearchClusters(getRoot().configModelRepo())); for (AbstractSearchCluster searchCluster : searchClusters) { searchCluster.getConfig(builder); } } @Override public void getConfig(ClusterInfoConfig.Builder builder) { builder.clusterId(name); builder.nodeCount(containers.size()); for (Service service : getDescendantServices()) { builder.services.add(new ClusterInfoConfig.Services.Builder() .index(Integer.parseInt(service.getServicePropertyString("index", "99999"))) .hostname(service.getHostName()) .ports(getPorts(service))); } } /** * Returns a config server config containing the right zone settings (and defaults for the rest). * This is useful to allow applications to find out in which zone they are runnung by having the Zone * object (which is constructed from this config) injected. */ @Override public void getConfig(ConfigserverConfig.Builder builder) { builder.system(zone.system().value()); builder.environment(zone.environment().value()); builder.region(zone.region().value()); } private List<ClusterInfoConfig.Services.Ports.Builder> getPorts(Service service) { List<ClusterInfoConfig.Services.Ports.Builder> builders = new ArrayList<>(); PortsMeta portsMeta = service.getPortsMeta(); for (int i = 0; i < portsMeta.getNumPorts(); i++) { builders.add(new ClusterInfoConfig.Services.Ports.Builder() .number(service.getRelativePort(i)) .tags(ApplicationConfigProducerRoot.getPortTags(portsMeta, i)) ); } return builders; } public boolean isHostedVespa() { return isHostedVespa; } @Override public void getConfig(RoutingProviderConfig.Builder builder) { builder.enabled(isHostedVespa); } public Map<String, String> concreteDocumentTypes() { return concreteDocumentTypes; } /** The configured service aliases for the service in this cluster */ public List<String> serviceAliases() { return serviceAliases; } /** The configured endpoint aliases (fqdn) for the service in this cluster */ public List<String> endpointAliases() { return endpointAliases; } public void setHostClusterId(String clusterId) { hostClusterId = clusterId; } /** * Returns the id of the content cluster which hosts this container cluster, if any. * This is only set with hosted clusters where this container cluster is set up to run on the nodes * of a content cluster. */ public Optional<String> getHostClusterId() { return Optional.ofNullable(hostClusterId); } public void setMemoryPercentage(Integer memoryPercentage) { this.memoryPercentage = memoryPercentage; } public void setJvmGCOptions(String opts) { this.jvmGCOptions = opts; } public void setEnvironmentVars(String environmentVars) { this.environmentVars = environmentVars; } public Optional<String> getJvmGCOptions() { return Optional.ofNullable(jvmGCOptions); } /** * Returns the percentage of host physical memory this application has specified for nodes in this cluster, * or empty if this is not specified by the application. */ public Optional<Integer> getMemoryPercentage() { return Optional.ofNullable(memoryPercentage); } public final void setMessageBusEnabled(boolean messageBusEnabled) { this.messageBusEnabled = messageBusEnabled; } boolean messageBusEnabled() { return messageBusEnabled; } public final void setRpcServerEnabled(boolean rpcServerEnabled) { this.rpcServerEnabled = rpcServerEnabled; } boolean rpcServerEnabled() { return rpcServerEnabled; } boolean httpServerEnabled() { return httpServerEnabled; } public void setHttpServerEnabled(boolean httpServerEnabled) { this.httpServerEnabled = httpServerEnabled; } @Override public String toString() { return "container cluster '" + getName() + "'"; } public static class MbusParams { final Double maxConcurrentFactor; final Double documentExpansionFactor; final Integer containerCoreMemory; public MbusParams(Double maxConcurrentFactor, Double documentExpansionFactor, Integer containerCoreMemory) { this.maxConcurrentFactor = maxConcurrentFactor; this.documentExpansionFactor = documentExpansionFactor; this.containerCoreMemory = containerCoreMemory; } } }
Is this standard practice Vespa? Seems backwards. If the client wants a timeout of X seconds they want the response no later than X seconds. To honor this the implementation/backends must use shorter timeouts to guarantee the response is sent no later than X.
public Search search(Query query) { try { URI target = endpoint.resolve(searchApiPath).resolve("?" + query.rawQuery()); HttpResponse<byte[]> response = send(HttpRequest.newBuilder(target) .timeout(query.timeout().orElse(Duration.ofMillis(500)) .plus(Duration.ofSeconds(1))), HttpResponse.BodyHandlers.ofByteArray()); if (response.statusCode() / 100 != 2) throw new RuntimeException("Non-OK status code " + response.statusCode() + " at " + target + ", with response \n" + new String(response.body())); return toSearch(response.body()); } catch (Exception e) { throw new RuntimeException(e); } }
.plus(Duration.ofSeconds(1))),
public Search search(Query query) { try { URI target = endpoint.resolve(searchApiPath).resolve("?" + query.rawQuery()); HttpResponse<byte[]> response = send(HttpRequest.newBuilder(target) .timeout(query.timeout().orElse(Duration.ofMillis(500)) .plus(Duration.ofSeconds(1))), HttpResponse.BodyHandlers.ofByteArray()); if (response.statusCode() / 100 != 2) throw new RuntimeException("Non-OK status code " + response.statusCode() + " at " + target + ", with response \n" + new String(response.body())); return toSearch(response.body()); } catch (Exception e) { throw new RuntimeException(e); } }
class HttpEndpoint implements TestEndpoint { static final String metricsPath = "/state/v1/metrics"; static final String documentApiPath = "/document/v1"; static final String searchApiPath = "/search"; private final URI endpoint; private final HttpClient client; private final EndpointAuthenticator authenticator; public HttpEndpoint(URI endpoint, EndpointAuthenticator authenticator) { this.endpoint = requireNonNull(endpoint); this.authenticator = requireNonNull(authenticator); this.client = HttpClient.newBuilder() .sslContext(authenticator.sslContext()) .connectTimeout(Duration.ofSeconds(5)) .version(HttpClient.Version.HTTP_1_1) .build(); } @Override public Digest digest(Feed feed) { return null; } @Override public URI uri() { return endpoint; } @Override public <T> HttpResponse<T> send(HttpRequest.Builder request, HttpResponse.BodyHandler<T> handler) { try { return client.send(authenticator.authenticated(request).build(), handler); } catch (IOException | InterruptedException e) { throw new RuntimeException(e); } } @Override static Search toSearch(byte[] body) { return new Search(new String(body, UTF_8)); } @Override public Visit visit(Selection selection) { return null; } @Override public Metrics metrics() { return null; } }
class HttpEndpoint implements TestEndpoint { static final String metricsPath = "/state/v1/metrics"; static final String documentApiPath = "/document/v1"; static final String searchApiPath = "/search"; private final URI endpoint; private final HttpClient client; private final EndpointAuthenticator authenticator; public HttpEndpoint(URI endpoint, EndpointAuthenticator authenticator) { this.endpoint = requireNonNull(endpoint); this.authenticator = requireNonNull(authenticator); this.client = HttpClient.newBuilder() .sslContext(authenticator.sslContext()) .connectTimeout(Duration.ofSeconds(5)) .version(HttpClient.Version.HTTP_1_1) .build(); } @Override public Digest digest(Feed feed) { return null; } @Override public URI uri() { return endpoint; } @Override public <T> HttpResponse<T> send(HttpRequest.Builder request, HttpResponse.BodyHandler<T> handler) { try { return client.send(authenticator.authenticated(request).build(), handler); } catch (IOException | InterruptedException e) { throw new RuntimeException(e); } } @Override static Search toSearch(byte[] body) { return new Search(new String(body, UTF_8)); } @Override public Visit visit(Selection selection) { return null; } @Override public Metrics metrics() { return null; } }
That would be good. Will experiment and see if it’s the case. Don’t know how the timeout relate to the events the server sees.
public Search search(Query query) { try { URI target = endpoint.resolve(searchApiPath).resolve("?" + query.rawQuery()); HttpResponse<byte[]> response = send(HttpRequest.newBuilder(target) .timeout(query.timeout().orElse(Duration.ofMillis(500)) .plus(Duration.ofSeconds(1))), HttpResponse.BodyHandlers.ofByteArray()); if (response.statusCode() / 100 != 2) throw new RuntimeException("Non-OK status code " + response.statusCode() + " at " + target + ", with response \n" + new String(response.body())); return toSearch(response.body()); } catch (Exception e) { throw new RuntimeException(e); } }
.plus(Duration.ofSeconds(1))),
public Search search(Query query) { try { URI target = endpoint.resolve(searchApiPath).resolve("?" + query.rawQuery()); HttpResponse<byte[]> response = send(HttpRequest.newBuilder(target) .timeout(query.timeout().orElse(Duration.ofMillis(500)) .plus(Duration.ofSeconds(1))), HttpResponse.BodyHandlers.ofByteArray()); if (response.statusCode() / 100 != 2) throw new RuntimeException("Non-OK status code " + response.statusCode() + " at " + target + ", with response \n" + new String(response.body())); return toSearch(response.body()); } catch (Exception e) { throw new RuntimeException(e); } }
class HttpEndpoint implements TestEndpoint { static final String metricsPath = "/state/v1/metrics"; static final String documentApiPath = "/document/v1"; static final String searchApiPath = "/search"; private final URI endpoint; private final HttpClient client; private final EndpointAuthenticator authenticator; public HttpEndpoint(URI endpoint, EndpointAuthenticator authenticator) { this.endpoint = requireNonNull(endpoint); this.authenticator = requireNonNull(authenticator); this.client = HttpClient.newBuilder() .sslContext(authenticator.sslContext()) .connectTimeout(Duration.ofSeconds(5)) .version(HttpClient.Version.HTTP_1_1) .build(); } @Override public Digest digest(Feed feed) { return null; } @Override public URI uri() { return endpoint; } @Override public <T> HttpResponse<T> send(HttpRequest.Builder request, HttpResponse.BodyHandler<T> handler) { try { return client.send(authenticator.authenticated(request).build(), handler); } catch (IOException | InterruptedException e) { throw new RuntimeException(e); } } @Override static Search toSearch(byte[] body) { return new Search(new String(body, UTF_8)); } @Override public Visit visit(Selection selection) { return null; } @Override public Metrics metrics() { return null; } }
class HttpEndpoint implements TestEndpoint { static final String metricsPath = "/state/v1/metrics"; static final String documentApiPath = "/document/v1"; static final String searchApiPath = "/search"; private final URI endpoint; private final HttpClient client; private final EndpointAuthenticator authenticator; public HttpEndpoint(URI endpoint, EndpointAuthenticator authenticator) { this.endpoint = requireNonNull(endpoint); this.authenticator = requireNonNull(authenticator); this.client = HttpClient.newBuilder() .sslContext(authenticator.sslContext()) .connectTimeout(Duration.ofSeconds(5)) .version(HttpClient.Version.HTTP_1_1) .build(); } @Override public Digest digest(Feed feed) { return null; } @Override public URI uri() { return endpoint; } @Override public <T> HttpResponse<T> send(HttpRequest.Builder request, HttpResponse.BodyHandler<T> handler) { try { return client.send(authenticator.authenticated(request).build(), handler); } catch (IOException | InterruptedException e) { throw new RuntimeException(e); } } @Override static Search toSearch(byte[] body) { return new Search(new String(body, UTF_8)); } @Override public Visit visit(Selection selection) { return null; } @Override public Metrics metrics() { return null; } }
AFAIK, there is no harm to calling upgrade() as it is idempotent, and skipping it is an optimization. Consider just always calling.
private boolean upgradeAll(Version target, List<SystemApplication> applications, ZoneApi zone) { boolean converged = true; for (SystemApplication application : applications) { if (convergedOn(target, application.dependencies(), zone)) { boolean currentAppConverged = convergedOn(target, application, zone); if (application == SystemApplication.tenantHost || !currentAppConverged) { upgrade(target, application, zone); } converged &= currentAppConverged; } } return converged; }
if (application == SystemApplication.tenantHost || !currentAppConverged) {
private boolean upgradeAll(Version target, List<SystemApplication> applications, ZoneApi zone) { boolean converged = true; for (SystemApplication application : applications) { if (convergedOn(target, application.dependencies(), zone)) { boolean currentAppConverged = convergedOn(target, application, zone); if (application == SystemApplication.tenantHost || !currentAppConverged) { upgrade(target, application, zone); } converged &= currentAppConverged; } } return converged; }
class InfrastructureUpgrader extends Maintainer { private static final Logger log = Logger.getLogger(InfrastructureUpgrader.class.getName()); private final UpgradePolicy upgradePolicy; public InfrastructureUpgrader(Controller controller, Duration interval, JobControl jobControl, UpgradePolicy upgradePolicy, String name) { super(controller, interval, jobControl, name, EnumSet.allOf(SystemName.class)); this.upgradePolicy = upgradePolicy; } @Override protected void maintain() { targetVersion().ifPresent(target -> upgradeAll(target, SystemApplication.all())); } /** Deploy a list of system applications until they converge on the given version */ private void upgradeAll(Version target, List<SystemApplication> applications) { for (List<ZoneApi> zones : upgradePolicy.asList()) { boolean converged = true; for (ZoneApi zone : zones) { try { converged &= upgradeAll(target, applications, zone); } catch (UnreachableNodeRepositoryException e) { converged = false; log.warning(String.format("%s: Failed to communicate with node repository in %s, continuing with next parallel zone: %s", this, zone, Exceptions.toMessageString(e))); } catch (Exception e) { converged = false; log.warning(String.format("%s: Failed to upgrade zone: %s, continuing with next parallel zone: %s", this, zone, Exceptions.toMessageString(e))); } } if (!converged) { break; } } } /** Returns whether all applications have converged to the target version in zone */ private boolean convergedOn(Version target, List<SystemApplication> applications, ZoneApi zone) { return applications.stream().allMatch(application -> convergedOn(target, application, zone)); } /** Upgrade component to target version. Implementation should be idempotent */ protected abstract void upgrade(Version target, SystemApplication application, ZoneApi zone); /** Returns whether application has converged to target version in zone */ protected abstract boolean convergedOn(Version target, SystemApplication application, ZoneApi zone); /** Returns the target version for the component upgraded by this, if any */ protected abstract Optional<Version> targetVersion(); /** Returns whether the upgrader should require given node to upgrade */ protected abstract boolean requireUpgradeOf(Node node, SystemApplication application, ZoneApi zone); /** Find the minimum value of a version field in a zone */ protected final Optional<Version> minVersion(ZoneApi zone, SystemApplication application, Function<Node, Version> versionField) { try { return controller().configServer() .nodeRepository() .list(zone.getId(), application.id()) .stream() .filter(node -> requireUpgradeOf(node, application, zone)) .map(versionField) .min(Comparator.naturalOrder()); } catch (Exception e) { throw new UnreachableNodeRepositoryException(String.format("Failed to get version for %s in %s: %s", application.id(), zone, Exceptions.toMessageString(e))); } } private class UnreachableNodeRepositoryException extends RuntimeException { private UnreachableNodeRepositoryException(String reason) { super(reason); } } }
class InfrastructureUpgrader extends Maintainer { private static final Logger log = Logger.getLogger(InfrastructureUpgrader.class.getName()); private final UpgradePolicy upgradePolicy; public InfrastructureUpgrader(Controller controller, Duration interval, JobControl jobControl, UpgradePolicy upgradePolicy, String name) { super(controller, interval, jobControl, name, EnumSet.allOf(SystemName.class)); this.upgradePolicy = upgradePolicy; } @Override protected void maintain() { targetVersion().ifPresent(target -> upgradeAll(target, SystemApplication.all())); } /** Deploy a list of system applications until they converge on the given version */ private void upgradeAll(Version target, List<SystemApplication> applications) { for (List<ZoneApi> zones : upgradePolicy.asList()) { boolean converged = true; for (ZoneApi zone : zones) { try { converged &= upgradeAll(target, applications, zone); } catch (UnreachableNodeRepositoryException e) { converged = false; log.warning(String.format("%s: Failed to communicate with node repository in %s, continuing with next parallel zone: %s", this, zone, Exceptions.toMessageString(e))); } catch (Exception e) { converged = false; log.warning(String.format("%s: Failed to upgrade zone: %s, continuing with next parallel zone: %s", this, zone, Exceptions.toMessageString(e))); } } if (!converged) { break; } } } /** Returns whether all applications have converged to the target version in zone */ private boolean convergedOn(Version target, List<SystemApplication> applications, ZoneApi zone) { return applications.stream().allMatch(application -> convergedOn(target, application, zone)); } /** Upgrade component to target version. Implementation should be idempotent */ protected abstract void upgrade(Version target, SystemApplication application, ZoneApi zone); /** Returns whether application has converged to target version in zone */ protected abstract boolean convergedOn(Version target, SystemApplication application, ZoneApi zone); /** Returns the target version for the component upgraded by this, if any */ protected abstract Optional<Version> targetVersion(); /** Returns whether the upgrader should require given node to upgrade */ protected abstract boolean requireUpgradeOf(Node node, SystemApplication application, ZoneApi zone); /** Find the minimum value of a version field in a zone */ protected final Optional<Version> minVersion(ZoneApi zone, SystemApplication application, Function<Node, Version> versionField) { try { return controller().configServer() .nodeRepository() .list(zone.getId(), application.id()) .stream() .filter(node -> requireUpgradeOf(node, application, zone)) .map(versionField) .min(Comparator.naturalOrder()); } catch (Exception e) { throw new UnreachableNodeRepositoryException(String.format("Failed to get version for %s in %s: %s", application.id(), zone, Exceptions.toMessageString(e))); } } private class UnreachableNodeRepositoryException extends RuntimeException { private UnreachableNodeRepositoryException(String reason) { super(reason); } } }
This is an optimization for infrastructure applications, but this also handles the zone application which will fail if deployed in a zone without proxy nodes.
private boolean upgradeAll(Version target, List<SystemApplication> applications, ZoneApi zone) { boolean converged = true; for (SystemApplication application : applications) { if (convergedOn(target, application.dependencies(), zone)) { boolean currentAppConverged = convergedOn(target, application, zone); if (application == SystemApplication.tenantHost || !currentAppConverged) { upgrade(target, application, zone); } converged &= currentAppConverged; } } return converged; }
if (application == SystemApplication.tenantHost || !currentAppConverged) {
private boolean upgradeAll(Version target, List<SystemApplication> applications, ZoneApi zone) { boolean converged = true; for (SystemApplication application : applications) { if (convergedOn(target, application.dependencies(), zone)) { boolean currentAppConverged = convergedOn(target, application, zone); if (application == SystemApplication.tenantHost || !currentAppConverged) { upgrade(target, application, zone); } converged &= currentAppConverged; } } return converged; }
class InfrastructureUpgrader extends Maintainer { private static final Logger log = Logger.getLogger(InfrastructureUpgrader.class.getName()); private final UpgradePolicy upgradePolicy; public InfrastructureUpgrader(Controller controller, Duration interval, JobControl jobControl, UpgradePolicy upgradePolicy, String name) { super(controller, interval, jobControl, name, EnumSet.allOf(SystemName.class)); this.upgradePolicy = upgradePolicy; } @Override protected void maintain() { targetVersion().ifPresent(target -> upgradeAll(target, SystemApplication.all())); } /** Deploy a list of system applications until they converge on the given version */ private void upgradeAll(Version target, List<SystemApplication> applications) { for (List<ZoneApi> zones : upgradePolicy.asList()) { boolean converged = true; for (ZoneApi zone : zones) { try { converged &= upgradeAll(target, applications, zone); } catch (UnreachableNodeRepositoryException e) { converged = false; log.warning(String.format("%s: Failed to communicate with node repository in %s, continuing with next parallel zone: %s", this, zone, Exceptions.toMessageString(e))); } catch (Exception e) { converged = false; log.warning(String.format("%s: Failed to upgrade zone: %s, continuing with next parallel zone: %s", this, zone, Exceptions.toMessageString(e))); } } if (!converged) { break; } } } /** Returns whether all applications have converged to the target version in zone */ private boolean convergedOn(Version target, List<SystemApplication> applications, ZoneApi zone) { return applications.stream().allMatch(application -> convergedOn(target, application, zone)); } /** Upgrade component to target version. Implementation should be idempotent */ protected abstract void upgrade(Version target, SystemApplication application, ZoneApi zone); /** Returns whether application has converged to target version in zone */ protected abstract boolean convergedOn(Version target, SystemApplication application, ZoneApi zone); /** Returns the target version for the component upgraded by this, if any */ protected abstract Optional<Version> targetVersion(); /** Returns whether the upgrader should require given node to upgrade */ protected abstract boolean requireUpgradeOf(Node node, SystemApplication application, ZoneApi zone); /** Find the minimum value of a version field in a zone */ protected final Optional<Version> minVersion(ZoneApi zone, SystemApplication application, Function<Node, Version> versionField) { try { return controller().configServer() .nodeRepository() .list(zone.getId(), application.id()) .stream() .filter(node -> requireUpgradeOf(node, application, zone)) .map(versionField) .min(Comparator.naturalOrder()); } catch (Exception e) { throw new UnreachableNodeRepositoryException(String.format("Failed to get version for %s in %s: %s", application.id(), zone, Exceptions.toMessageString(e))); } } private class UnreachableNodeRepositoryException extends RuntimeException { private UnreachableNodeRepositoryException(String reason) { super(reason); } } }
class InfrastructureUpgrader extends Maintainer { private static final Logger log = Logger.getLogger(InfrastructureUpgrader.class.getName()); private final UpgradePolicy upgradePolicy; public InfrastructureUpgrader(Controller controller, Duration interval, JobControl jobControl, UpgradePolicy upgradePolicy, String name) { super(controller, interval, jobControl, name, EnumSet.allOf(SystemName.class)); this.upgradePolicy = upgradePolicy; } @Override protected void maintain() { targetVersion().ifPresent(target -> upgradeAll(target, SystemApplication.all())); } /** Deploy a list of system applications until they converge on the given version */ private void upgradeAll(Version target, List<SystemApplication> applications) { for (List<ZoneApi> zones : upgradePolicy.asList()) { boolean converged = true; for (ZoneApi zone : zones) { try { converged &= upgradeAll(target, applications, zone); } catch (UnreachableNodeRepositoryException e) { converged = false; log.warning(String.format("%s: Failed to communicate with node repository in %s, continuing with next parallel zone: %s", this, zone, Exceptions.toMessageString(e))); } catch (Exception e) { converged = false; log.warning(String.format("%s: Failed to upgrade zone: %s, continuing with next parallel zone: %s", this, zone, Exceptions.toMessageString(e))); } } if (!converged) { break; } } } /** Returns whether all applications have converged to the target version in zone */ private boolean convergedOn(Version target, List<SystemApplication> applications, ZoneApi zone) { return applications.stream().allMatch(application -> convergedOn(target, application, zone)); } /** Upgrade component to target version. Implementation should be idempotent */ protected abstract void upgrade(Version target, SystemApplication application, ZoneApi zone); /** Returns whether application has converged to target version in zone */ protected abstract boolean convergedOn(Version target, SystemApplication application, ZoneApi zone); /** Returns the target version for the component upgraded by this, if any */ protected abstract Optional<Version> targetVersion(); /** Returns whether the upgrader should require given node to upgrade */ protected abstract boolean requireUpgradeOf(Node node, SystemApplication application, ZoneApi zone); /** Find the minimum value of a version field in a zone */ protected final Optional<Version> minVersion(ZoneApi zone, SystemApplication application, Function<Node, Version> versionField) { try { return controller().configServer() .nodeRepository() .list(zone.getId(), application.id()) .stream() .filter(node -> requireUpgradeOf(node, application, zone)) .map(versionField) .min(Comparator.naturalOrder()); } catch (Exception e) { throw new UnreachableNodeRepositoryException(String.format("Failed to get version for %s in %s: %s", application.id(), zone, Exceptions.toMessageString(e))); } } private class UnreachableNodeRepositoryException extends RuntimeException { private UnreachableNodeRepositoryException(String reason) { super(reason); } } }
Right, the set of infrastructure applications could depend on the ZoneApi.
private boolean upgradeAll(Version target, List<SystemApplication> applications, ZoneApi zone) { boolean converged = true; for (SystemApplication application : applications) { if (convergedOn(target, application.dependencies(), zone)) { boolean currentAppConverged = convergedOn(target, application, zone); if (application == SystemApplication.tenantHost || !currentAppConverged) { upgrade(target, application, zone); } converged &= currentAppConverged; } } return converged; }
if (application == SystemApplication.tenantHost || !currentAppConverged) {
private boolean upgradeAll(Version target, List<SystemApplication> applications, ZoneApi zone) { boolean converged = true; for (SystemApplication application : applications) { if (convergedOn(target, application.dependencies(), zone)) { boolean currentAppConverged = convergedOn(target, application, zone); if (application == SystemApplication.tenantHost || !currentAppConverged) { upgrade(target, application, zone); } converged &= currentAppConverged; } } return converged; }
class InfrastructureUpgrader extends Maintainer { private static final Logger log = Logger.getLogger(InfrastructureUpgrader.class.getName()); private final UpgradePolicy upgradePolicy; public InfrastructureUpgrader(Controller controller, Duration interval, JobControl jobControl, UpgradePolicy upgradePolicy, String name) { super(controller, interval, jobControl, name, EnumSet.allOf(SystemName.class)); this.upgradePolicy = upgradePolicy; } @Override protected void maintain() { targetVersion().ifPresent(target -> upgradeAll(target, SystemApplication.all())); } /** Deploy a list of system applications until they converge on the given version */ private void upgradeAll(Version target, List<SystemApplication> applications) { for (List<ZoneApi> zones : upgradePolicy.asList()) { boolean converged = true; for (ZoneApi zone : zones) { try { converged &= upgradeAll(target, applications, zone); } catch (UnreachableNodeRepositoryException e) { converged = false; log.warning(String.format("%s: Failed to communicate with node repository in %s, continuing with next parallel zone: %s", this, zone, Exceptions.toMessageString(e))); } catch (Exception e) { converged = false; log.warning(String.format("%s: Failed to upgrade zone: %s, continuing with next parallel zone: %s", this, zone, Exceptions.toMessageString(e))); } } if (!converged) { break; } } } /** Returns whether all applications have converged to the target version in zone */ private boolean convergedOn(Version target, List<SystemApplication> applications, ZoneApi zone) { return applications.stream().allMatch(application -> convergedOn(target, application, zone)); } /** Upgrade component to target version. Implementation should be idempotent */ protected abstract void upgrade(Version target, SystemApplication application, ZoneApi zone); /** Returns whether application has converged to target version in zone */ protected abstract boolean convergedOn(Version target, SystemApplication application, ZoneApi zone); /** Returns the target version for the component upgraded by this, if any */ protected abstract Optional<Version> targetVersion(); /** Returns whether the upgrader should require given node to upgrade */ protected abstract boolean requireUpgradeOf(Node node, SystemApplication application, ZoneApi zone); /** Find the minimum value of a version field in a zone */ protected final Optional<Version> minVersion(ZoneApi zone, SystemApplication application, Function<Node, Version> versionField) { try { return controller().configServer() .nodeRepository() .list(zone.getId(), application.id()) .stream() .filter(node -> requireUpgradeOf(node, application, zone)) .map(versionField) .min(Comparator.naturalOrder()); } catch (Exception e) { throw new UnreachableNodeRepositoryException(String.format("Failed to get version for %s in %s: %s", application.id(), zone, Exceptions.toMessageString(e))); } } private class UnreachableNodeRepositoryException extends RuntimeException { private UnreachableNodeRepositoryException(String reason) { super(reason); } } }
class InfrastructureUpgrader extends Maintainer { private static final Logger log = Logger.getLogger(InfrastructureUpgrader.class.getName()); private final UpgradePolicy upgradePolicy; public InfrastructureUpgrader(Controller controller, Duration interval, JobControl jobControl, UpgradePolicy upgradePolicy, String name) { super(controller, interval, jobControl, name, EnumSet.allOf(SystemName.class)); this.upgradePolicy = upgradePolicy; } @Override protected void maintain() { targetVersion().ifPresent(target -> upgradeAll(target, SystemApplication.all())); } /** Deploy a list of system applications until they converge on the given version */ private void upgradeAll(Version target, List<SystemApplication> applications) { for (List<ZoneApi> zones : upgradePolicy.asList()) { boolean converged = true; for (ZoneApi zone : zones) { try { converged &= upgradeAll(target, applications, zone); } catch (UnreachableNodeRepositoryException e) { converged = false; log.warning(String.format("%s: Failed to communicate with node repository in %s, continuing with next parallel zone: %s", this, zone, Exceptions.toMessageString(e))); } catch (Exception e) { converged = false; log.warning(String.format("%s: Failed to upgrade zone: %s, continuing with next parallel zone: %s", this, zone, Exceptions.toMessageString(e))); } } if (!converged) { break; } } } /** Returns whether all applications have converged to the target version in zone */ private boolean convergedOn(Version target, List<SystemApplication> applications, ZoneApi zone) { return applications.stream().allMatch(application -> convergedOn(target, application, zone)); } /** Upgrade component to target version. Implementation should be idempotent */ protected abstract void upgrade(Version target, SystemApplication application, ZoneApi zone); /** Returns whether application has converged to target version in zone */ protected abstract boolean convergedOn(Version target, SystemApplication application, ZoneApi zone); /** Returns the target version for the component upgraded by this, if any */ protected abstract Optional<Version> targetVersion(); /** Returns whether the upgrader should require given node to upgrade */ protected abstract boolean requireUpgradeOf(Node node, SystemApplication application, ZoneApi zone); /** Find the minimum value of a version field in a zone */ protected final Optional<Version> minVersion(ZoneApi zone, SystemApplication application, Function<Node, Version> versionField) { try { return controller().configServer() .nodeRepository() .list(zone.getId(), application.id()) .stream() .filter(node -> requireUpgradeOf(node, application, zone)) .map(versionField) .min(Comparator.naturalOrder()); } catch (Exception e) { throw new UnreachableNodeRepositoryException(String.format("Failed to get version for %s in %s: %s", application.id(), zone, Exceptions.toMessageString(e))); } } private class UnreachableNodeRepositoryException extends RuntimeException { private UnreachableNodeRepositoryException(String reason) { super(reason); } } }
`java.io.UncheckedIOException`?
public Optional<String> readUtf8FileIfExists() { try { return Optional.of(new String(Files.readAllBytes(path), StandardCharsets.UTF_8)); } catch (NoSuchFileException ignored) { return Optional.empty(); } catch (IOException e) { throw new RuntimeIOException(e); } }
throw new RuntimeIOException(e);
public Optional<String> readUtf8FileIfExists() { try { return Optional.of(new String(Files.readAllBytes(path), StandardCharsets.UTF_8)); } catch (NoSuchFileException ignored) { return Optional.empty(); } catch (IOException e) { throw new UncheckedIOException(e); } }
class UnixPath { private final Path path; public UnixPath(Path path) { this.path = path; } public UnixPath(String path) { this(Paths.get(path)); } public Path toPath() { return path; } public boolean exists() { return Files.exists(path); } public String readUtf8File() { return new String(readBytes(), StandardCharsets.UTF_8); } public byte[] readBytes() { return uncheck(() -> Files.readAllBytes(path)); } public UnixPath writeUtf8File(String content, OpenOption... options) { return writeBytes(content.getBytes(StandardCharsets.UTF_8), options); } public UnixPath writeBytes(byte[] content, OpenOption... options) { uncheck(() -> Files.write(path, content, options)); return this; } public String getPermissions() { return getAttributes().permissions(); } /** * @param permissions Example: "rwxr-x---" means rwx for owner, rx for group, * and no permissions for others. */ public UnixPath setPermissions(String permissions) { Set<PosixFilePermission> permissionSet = getPosixFilePermissionsFromString(permissions); uncheck(() -> Files.setPosixFilePermissions(path, permissionSet)); return this; } public String getOwner() { return getAttributes().owner(); } public UnixPath setOwner(String owner) { UserPrincipalLookupService service = path.getFileSystem().getUserPrincipalLookupService(); UserPrincipal principal = uncheck( () -> service.lookupPrincipalByName(owner), "While looking up user %s", owner); uncheck(() -> Files.setOwner(path, principal)); return this; } public String getGroup() { return getAttributes().group(); } public UnixPath setGroup(String group) { UserPrincipalLookupService service = path.getFileSystem().getUserPrincipalLookupService(); GroupPrincipal principal = uncheck( () -> service.lookupPrincipalByGroupName(group), "while looking up group %s", group); uncheck(() -> Files.getFileAttributeView(path, PosixFileAttributeView.class).setGroup(principal)); return this; } public Instant getLastModifiedTime() { return getAttributes().lastModifiedTime(); } public FileAttributes getAttributes() { PosixFileAttributes attributes = uncheck(() -> Files.getFileAttributeView(path, PosixFileAttributeView.class).readAttributes()); return new FileAttributes(attributes); } public Optional<FileAttributes> getAttributesIfExists() { return ifExists(this::getAttributes); } public UnixPath createNewFile() { uncheck(() -> Files.createFile(path)); return this; } public UnixPath createNewFile(String permissions) { FileAttribute<?> attribute = PosixFilePermissions.asFileAttribute(PosixFilePermissions.fromString(permissions)); uncheck(() -> Files.createFile(path, attribute)); return this; } public UnixPath createParents() { Path parent = path.getParent(); if (!Files.isDirectory(parent)) { uncheck(() -> Files.createDirectories(parent)); } return this; } public UnixPath createDirectory(String permissions) { Set<PosixFilePermission> set = getPosixFilePermissionsFromString(permissions); FileAttribute<Set<PosixFilePermission>> attribute = PosixFilePermissions.asFileAttribute(set); uncheck(() -> Files.createDirectory(path, attribute)); return this; } public UnixPath createDirectory() { uncheck(() -> Files.createDirectory(path)); return this; } public boolean isDirectory() { return uncheck(() -> Files.isDirectory(path)); } /** * Similar to rm -rf file: * - It's not an error if file doesn't exist * - If file is a directory, it and all content is removed * - For symlinks: Only the symlink is removed, not what the symlink points to */ public boolean deleteRecursively() { if (isDirectory()) { for (UnixPath path : listContentsOfDirectory()) { path.deleteRecursively(); } } return uncheck(() -> Files.deleteIfExists(path)); } public UnixPath deleteIfExists() { uncheck(() -> Files.deleteIfExists(path)); return this; } public List<UnixPath> listContentsOfDirectory() { try (Stream<Path> stream = Files.list(path)){ return stream .map(UnixPath::new) .collect(Collectors.toList()); } catch (NoSuchFileException ignored) { return Collections.emptyList(); } catch (IOException e) { throw new RuntimeException("Failed to list contents of directory " + path.toAbsolutePath(), e); } } /** This path must be on the same file system as the to-path. Returns UnixPath of 'to'. */ public UnixPath atomicMove(Path to) { uncheck(() -> Files.move(path, to, StandardCopyOption.ATOMIC_MOVE)); return new UnixPath(to); } public boolean moveIfExists(Path to) { try { Files.move(path, to); return true; } catch (NoSuchFileException ignored) { return false; } catch (IOException e) { throw new UncheckedIOException(e); } } /** * Creates a symbolic link from {@code link} to {@code this} (the target) * @param link the path for the symbolic link * @return the path to the symbolic link */ public UnixPath createSymbolicLink(Path link) { uncheck(() -> Files.createSymbolicLink(link, path)); return new UnixPath(link); } @Override public String toString() { return path.toString(); } private Set<PosixFilePermission> getPosixFilePermissionsFromString(String permissions) { try { return PosixFilePermissions.fromString(permissions); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Failed to set permissions '" + permissions + "' on path " + path, e); } } }
class UnixPath { private final Path path; public UnixPath(Path path) { this.path = path; } public UnixPath(String path) { this(Paths.get(path)); } public Path toPath() { return path; } public boolean exists() { return Files.exists(path); } public String readUtf8File() { return new String(readBytes(), StandardCharsets.UTF_8); } public byte[] readBytes() { return uncheck(() -> Files.readAllBytes(path)); } public UnixPath writeUtf8File(String content, OpenOption... options) { return writeBytes(content.getBytes(StandardCharsets.UTF_8), options); } public UnixPath writeBytes(byte[] content, OpenOption... options) { uncheck(() -> Files.write(path, content, options)); return this; } public String getPermissions() { return getAttributes().permissions(); } /** * @param permissions Example: "rwxr-x---" means rwx for owner, rx for group, * and no permissions for others. */ public UnixPath setPermissions(String permissions) { Set<PosixFilePermission> permissionSet = getPosixFilePermissionsFromString(permissions); uncheck(() -> Files.setPosixFilePermissions(path, permissionSet)); return this; } public String getOwner() { return getAttributes().owner(); } public UnixPath setOwner(String owner) { UserPrincipalLookupService service = path.getFileSystem().getUserPrincipalLookupService(); UserPrincipal principal = uncheck( () -> service.lookupPrincipalByName(owner), "While looking up user %s", owner); uncheck(() -> Files.setOwner(path, principal)); return this; } public String getGroup() { return getAttributes().group(); } public UnixPath setGroup(String group) { UserPrincipalLookupService service = path.getFileSystem().getUserPrincipalLookupService(); GroupPrincipal principal = uncheck( () -> service.lookupPrincipalByGroupName(group), "while looking up group %s", group); uncheck(() -> Files.getFileAttributeView(path, PosixFileAttributeView.class).setGroup(principal)); return this; } public Instant getLastModifiedTime() { return getAttributes().lastModifiedTime(); } public FileAttributes getAttributes() { PosixFileAttributes attributes = uncheck(() -> Files.getFileAttributeView(path, PosixFileAttributeView.class).readAttributes()); return new FileAttributes(attributes); } public Optional<FileAttributes> getAttributesIfExists() { return ifExists(this::getAttributes); } public UnixPath createNewFile() { uncheck(() -> Files.createFile(path)); return this; } public UnixPath createNewFile(String permissions) { FileAttribute<?> attribute = PosixFilePermissions.asFileAttribute(PosixFilePermissions.fromString(permissions)); uncheck(() -> Files.createFile(path, attribute)); return this; } public UnixPath createParents() { Path parent = path.getParent(); if (!Files.isDirectory(parent)) { uncheck(() -> Files.createDirectories(parent)); } return this; } public UnixPath createDirectory(String permissions) { Set<PosixFilePermission> set = getPosixFilePermissionsFromString(permissions); FileAttribute<Set<PosixFilePermission>> attribute = PosixFilePermissions.asFileAttribute(set); uncheck(() -> Files.createDirectory(path, attribute)); return this; } public UnixPath createDirectory() { uncheck(() -> Files.createDirectory(path)); return this; } public boolean isDirectory() { return uncheck(() -> Files.isDirectory(path)); } /** * Similar to rm -rf file: * - It's not an error if file doesn't exist * - If file is a directory, it and all content is removed * - For symlinks: Only the symlink is removed, not what the symlink points to */ public boolean deleteRecursively() { if (isDirectory()) { for (UnixPath path : listContentsOfDirectory()) { path.deleteRecursively(); } } return uncheck(() -> Files.deleteIfExists(path)); } public UnixPath deleteIfExists() { uncheck(() -> Files.deleteIfExists(path)); return this; } public List<UnixPath> listContentsOfDirectory() { try (Stream<Path> stream = Files.list(path)){ return stream .map(UnixPath::new) .collect(Collectors.toList()); } catch (NoSuchFileException ignored) { return Collections.emptyList(); } catch (IOException e) { throw new RuntimeException("Failed to list contents of directory " + path.toAbsolutePath(), e); } } /** This path must be on the same file system as the to-path. Returns UnixPath of 'to'. */ public UnixPath atomicMove(Path to) { uncheck(() -> Files.move(path, to, StandardCopyOption.ATOMIC_MOVE)); return new UnixPath(to); } public boolean moveIfExists(Path to) { try { Files.move(path, to); return true; } catch (NoSuchFileException ignored) { return false; } catch (IOException e) { throw new UncheckedIOException(e); } } /** * Creates a symbolic link from {@code link} to {@code this} (the target) * @param link the path for the symbolic link * @return the path to the symbolic link */ public UnixPath createSymbolicLink(Path link) { uncheck(() -> Files.createSymbolicLink(link, path)); return new UnixPath(link); } @Override public String toString() { return path.toString(); } private Set<PosixFilePermission> getPosixFilePermissionsFromString(String permissions) { try { return PosixFilePermissions.fromString(permissions); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Failed to set permissions '" + permissions + "' on path " + path, e); } } }
It's never `null`?
public void shutdownSourceConnections() { synchronized (activeSubscribersLock) { for (Subscriber subscriber : activeSubscribers.values()) { subscriber.cancel(); } activeSubscribers.clear(); } exec.shutdown(); if (requester != null) requester.close(); }
if (requester != null)
public void shutdownSourceConnections() { synchronized (activeSubscribersLock) { for (Subscriber subscriber : activeSubscribers.values()) { subscriber.cancel(); } activeSubscribers.clear(); } exec.shutdown(); requester.close(); }
class RpcConfigSourceClient implements ConfigSourceClient { private final static Logger log = Logger.getLogger(RpcConfigSourceClient.class.getName()); private final Supervisor supervisor = new Supervisor(new Transport()); private final RpcServer rpcServer; private final ConfigSourceSet configSourceSet; private final HashMap<ConfigCacheKey, Subscriber> activeSubscribers = new HashMap<>(); private final Object activeSubscribersLock = new Object(); private final MemoryCache memoryCache; private final DelayedResponses delayedResponses; private final TimingValues timingValues; private final ExecutorService exec; private final JRTConfigRequester requester; RpcConfigSourceClient(RpcServer rpcServer, ConfigSourceSet configSourceSet, MemoryCache memoryCache, TimingValues timingValues, DelayedResponses delayedResponses) { this.rpcServer = rpcServer; this.configSourceSet = configSourceSet; this.memoryCache = memoryCache; this.delayedResponses = delayedResponses; this.timingValues = timingValues; checkConfigSources(); exec = Executors.newCachedThreadPool(new DaemonThreadFactory("subscriber-")); requester = new JRTConfigRequester(new JRTConnectionPool(configSourceSet), timingValues); } /** * Checks if config sources are available */ private void checkConfigSources() { if (configSourceSet == null || configSourceSet.getSources() == null || configSourceSet.getSources().size() == 0) { log.log(LogLevel.WARNING, "No config sources defined, could not check connection"); } else { Request req = new Request("ping"); for (String configSource : configSourceSet.getSources()) { Spec spec = new Spec(configSource); Target target = supervisor.connect(spec); target.invokeSync(req, 30.0); if (target.isValid()) { log.log(LogLevel.DEBUG, () -> "Created connection to config source at " + spec.toString()); return; } else { log.log(LogLevel.INFO, "Could not connect to config source at " + spec.toString()); } target.close(); } String extra = ""; log.log(LogLevel.INFO, "Could not connect to any config source in set " + configSourceSet.toString() + ", please make sure config server(s) are running. " + extra); } } /** * Retrieves the requested config from the cache or the remote server. * <p> * If the requested config is different from the one in cache, the cached request is returned immediately. * If they are equal, this method returns null. * <p> * If the config was not in cache, this method starts a <em>Subscriber</em> in a separate thread * that gets the config and calls updateSubscribers(). * * @param input The config to retrieve - can be empty (no payload), or have a valid payload. * @return A Config with a payload. */ @Override public RawConfig getConfig(RawConfig input, JRTServerConfigRequest request) { DelayedResponse delayedResponse = new DelayedResponse(request); delayedResponses.add(delayedResponse); final ConfigCacheKey configCacheKey = new ConfigCacheKey(input.getKey(), input.getDefMd5()); RawConfig cachedConfig = memoryCache.get(configCacheKey); boolean needToGetConfig = true; RawConfig ret = null; if (cachedConfig != null) { log.log(LogLevel.DEBUG, () -> "Found config " + configCacheKey + " in cache, generation=" + cachedConfig.getGeneration() + ",configmd5=" + cachedConfig.getConfigMd5()); log.log(LogLevel.SPAM, () -> "input config=" + input + ",cached config=" + cachedConfig); if (ProxyServer.configOrGenerationHasChanged(cachedConfig, request)) { log.log(LogLevel.SPAM, () -> "Cached config is not equal to requested, will return it"); if (delayedResponses.remove(delayedResponse)) { ret = cachedConfig; } } if (!cachedConfig.isError() && cachedConfig.getGeneration() > 0) { needToGetConfig = false; } } if (needToGetConfig) { subscribeToConfig(input, configCacheKey); } return ret; } private void subscribeToConfig(RawConfig input, ConfigCacheKey configCacheKey) { synchronized (activeSubscribersLock) { if (activeSubscribers.containsKey(configCacheKey)) { log.log(LogLevel.DEBUG, () -> "Already a subscriber running for: " + configCacheKey); } else { log.log(LogLevel.DEBUG, () -> "Could not find good config in cache, creating subscriber for: " + configCacheKey); UpstreamConfigSubscriber subscriber = new UpstreamConfigSubscriber(input, this, configSourceSet, timingValues, requester, memoryCache); try { subscriber.subscribe(); activeSubscribers.put(configCacheKey, subscriber); exec.execute(subscriber); } catch (ConfigurationRuntimeException e) { log.log(LogLevel.INFO, "Subscribe for '" + configCacheKey + "' failed, closing subscriber"); subscriber.cancel(); } } } } @Override public void cancel() { shutdownSourceConnections(); } /** * Takes down connection(s) to config sources and running tasks */ @Override @Override public String getActiveSourceConnection() { if (requester != null) { return requester.getConnectionPool().getCurrent().getAddress(); } else { return ""; } } @Override public List<String> getSourceConnections() { ArrayList<String> ret = new ArrayList<>(); if (requester != null) { ret.addAll(configSourceSet.getSources()); } return ret; } /** * This method will be called when a response with changed config is received from upstream * (content or generation has changed) or the server timeout has elapsed. * * @param config new config */ public void updateSubscribers(RawConfig config) { log.log(LogLevel.DEBUG, () -> "Config updated for " + config.getKey() + "," + config.getGeneration()); DelayQueue<DelayedResponse> responseDelayQueue = delayedResponses.responses(); log.log(LogLevel.SPAM, () -> "Delayed response queue: " + responseDelayQueue); if (responseDelayQueue.size() == 0) { log.log(LogLevel.DEBUG, () -> "There exists no matching element on delayed response queue for " + config.getKey()); return; } else { log.log(LogLevel.DEBUG, () -> "Delayed response queue has " + responseDelayQueue.size() + " elements"); } boolean found = false; for (DelayedResponse response : responseDelayQueue.toArray(new DelayedResponse[0])) { JRTServerConfigRequest request = response.getRequest(); if (request.getConfigKey().equals(config.getKey()) && (config.getGeneration() >= request.getRequestGeneration() || config.getGeneration() == 0)) { if (delayedResponses.remove(response)) { found = true; log.log(LogLevel.DEBUG, () -> "Call returnOkResponse for " + config.getKey() + "," + config.getGeneration()); rpcServer.returnOkResponse(request, config); } else { log.log(LogLevel.INFO, "Could not remove " + config.getKey() + " from delayedResponses queue, already removed"); } } } if (!found) { log.log(LogLevel.DEBUG, () -> "Found no recipient for " + config.getKey() + " in delayed response queue"); } log.log(LogLevel.DEBUG, () -> "Finished updating config for " + config.getKey() + "," + config.getGeneration()); } }
class RpcConfigSourceClient implements ConfigSourceClient { private final static Logger log = Logger.getLogger(RpcConfigSourceClient.class.getName()); private final Supervisor supervisor = new Supervisor(new Transport()); private final RpcServer rpcServer; private final ConfigSourceSet configSourceSet; private final HashMap<ConfigCacheKey, Subscriber> activeSubscribers = new HashMap<>(); private final Object activeSubscribersLock = new Object(); private final MemoryCache memoryCache; private final DelayedResponses delayedResponses; private final TimingValues timingValues; private final ExecutorService exec; private final JRTConfigRequester requester; RpcConfigSourceClient(RpcServer rpcServer, ConfigSourceSet configSourceSet, MemoryCache memoryCache, TimingValues timingValues, DelayedResponses delayedResponses) { this.rpcServer = rpcServer; this.configSourceSet = configSourceSet; this.memoryCache = memoryCache; this.delayedResponses = delayedResponses; this.timingValues = timingValues; checkConfigSources(); exec = Executors.newCachedThreadPool(new DaemonThreadFactory("subscriber-")); requester = new JRTConfigRequester(new JRTConnectionPool(configSourceSet), timingValues); } /** * Checks if config sources are available */ private void checkConfigSources() { if (configSourceSet == null || configSourceSet.getSources() == null || configSourceSet.getSources().size() == 0) { log.log(LogLevel.WARNING, "No config sources defined, could not check connection"); } else { Request req = new Request("ping"); for (String configSource : configSourceSet.getSources()) { Spec spec = new Spec(configSource); Target target = supervisor.connect(spec); target.invokeSync(req, 30.0); if (target.isValid()) { log.log(LogLevel.DEBUG, () -> "Created connection to config source at " + spec.toString()); return; } else { log.log(LogLevel.INFO, "Could not connect to config source at " + spec.toString()); } target.close(); } String extra = ""; log.log(LogLevel.INFO, "Could not connect to any config source in set " + configSourceSet.toString() + ", please make sure config server(s) are running. " + extra); } } /** * Retrieves the requested config from the cache or the remote server. * <p> * If the requested config is different from the one in cache, the cached request is returned immediately. * If they are equal, this method returns null. * <p> * If the config was not in cache, this method starts a <em>Subscriber</em> in a separate thread * that gets the config and calls updateSubscribers(). * * @param input The config to retrieve - can be empty (no payload), or have a valid payload. * @return A Config with a payload. */ @Override public RawConfig getConfig(RawConfig input, JRTServerConfigRequest request) { DelayedResponse delayedResponse = new DelayedResponse(request); delayedResponses.add(delayedResponse); final ConfigCacheKey configCacheKey = new ConfigCacheKey(input.getKey(), input.getDefMd5()); RawConfig cachedConfig = memoryCache.get(configCacheKey); boolean needToGetConfig = true; RawConfig ret = null; if (cachedConfig != null) { log.log(LogLevel.DEBUG, () -> "Found config " + configCacheKey + " in cache, generation=" + cachedConfig.getGeneration() + ",configmd5=" + cachedConfig.getConfigMd5()); log.log(LogLevel.SPAM, () -> "input config=" + input + ",cached config=" + cachedConfig); if (ProxyServer.configOrGenerationHasChanged(cachedConfig, request)) { log.log(LogLevel.SPAM, () -> "Cached config is not equal to requested, will return it"); if (delayedResponses.remove(delayedResponse)) { ret = cachedConfig; } } if (!cachedConfig.isError() && cachedConfig.getGeneration() > 0) { needToGetConfig = false; } } if (needToGetConfig) { subscribeToConfig(input, configCacheKey); } return ret; } private void subscribeToConfig(RawConfig input, ConfigCacheKey configCacheKey) { synchronized (activeSubscribersLock) { if (activeSubscribers.containsKey(configCacheKey)) { log.log(LogLevel.DEBUG, () -> "Already a subscriber running for: " + configCacheKey); } else { log.log(LogLevel.DEBUG, () -> "Could not find good config in cache, creating subscriber for: " + configCacheKey); UpstreamConfigSubscriber subscriber = new UpstreamConfigSubscriber(input, this, configSourceSet, timingValues, requester, memoryCache); try { subscriber.subscribe(); activeSubscribers.put(configCacheKey, subscriber); exec.execute(subscriber); } catch (ConfigurationRuntimeException e) { log.log(LogLevel.INFO, "Subscribe for '" + configCacheKey + "' failed, closing subscriber"); subscriber.cancel(); } } } } @Override public void cancel() { shutdownSourceConnections(); } /** * Takes down connection(s) to config sources and running tasks */ @Override @Override public String getActiveSourceConnection() { return requester.getConnectionPool().getCurrent().getAddress(); } @Override public List<String> getSourceConnections() { return new ArrayList<>(configSourceSet.getSources()); } /** * This method will be called when a response with changed config is received from upstream * (content or generation has changed) or the server timeout has elapsed. * * @param config new config */ public void updateSubscribers(RawConfig config) { log.log(LogLevel.DEBUG, () -> "Config updated for " + config.getKey() + "," + config.getGeneration()); DelayQueue<DelayedResponse> responseDelayQueue = delayedResponses.responses(); log.log(LogLevel.SPAM, () -> "Delayed response queue: " + responseDelayQueue); if (responseDelayQueue.size() == 0) { log.log(LogLevel.DEBUG, () -> "There exists no matching element on delayed response queue for " + config.getKey()); return; } else { log.log(LogLevel.DEBUG, () -> "Delayed response queue has " + responseDelayQueue.size() + " elements"); } boolean found = false; for (DelayedResponse response : responseDelayQueue.toArray(new DelayedResponse[0])) { JRTServerConfigRequest request = response.getRequest(); if (request.getConfigKey().equals(config.getKey()) && (config.getGeneration() >= request.getRequestGeneration() || config.getGeneration() == 0)) { if (delayedResponses.remove(response)) { found = true; log.log(LogLevel.DEBUG, () -> "Call returnOkResponse for " + config.getKey() + "," + config.getGeneration()); rpcServer.returnOkResponse(request, config); } else { log.log(LogLevel.INFO, "Could not remove " + config.getKey() + " from delayedResponses queue, already removed"); } } } if (!found) { log.log(LogLevel.DEBUG, () -> "Found no recipient for " + config.getKey() + " in delayed response queue"); } log.log(LogLevel.DEBUG, () -> "Finished updating config for " + config.getKey() + "," + config.getGeneration()); } }
Yes!
public Optional<String> readUtf8FileIfExists() { try { return Optional.of(new String(Files.readAllBytes(path), StandardCharsets.UTF_8)); } catch (NoSuchFileException ignored) { return Optional.empty(); } catch (IOException e) { throw new RuntimeIOException(e); } }
throw new RuntimeIOException(e);
public Optional<String> readUtf8FileIfExists() { try { return Optional.of(new String(Files.readAllBytes(path), StandardCharsets.UTF_8)); } catch (NoSuchFileException ignored) { return Optional.empty(); } catch (IOException e) { throw new UncheckedIOException(e); } }
class UnixPath { private final Path path; public UnixPath(Path path) { this.path = path; } public UnixPath(String path) { this(Paths.get(path)); } public Path toPath() { return path; } public boolean exists() { return Files.exists(path); } public String readUtf8File() { return new String(readBytes(), StandardCharsets.UTF_8); } public byte[] readBytes() { return uncheck(() -> Files.readAllBytes(path)); } public UnixPath writeUtf8File(String content, OpenOption... options) { return writeBytes(content.getBytes(StandardCharsets.UTF_8), options); } public UnixPath writeBytes(byte[] content, OpenOption... options) { uncheck(() -> Files.write(path, content, options)); return this; } public String getPermissions() { return getAttributes().permissions(); } /** * @param permissions Example: "rwxr-x---" means rwx for owner, rx for group, * and no permissions for others. */ public UnixPath setPermissions(String permissions) { Set<PosixFilePermission> permissionSet = getPosixFilePermissionsFromString(permissions); uncheck(() -> Files.setPosixFilePermissions(path, permissionSet)); return this; } public String getOwner() { return getAttributes().owner(); } public UnixPath setOwner(String owner) { UserPrincipalLookupService service = path.getFileSystem().getUserPrincipalLookupService(); UserPrincipal principal = uncheck( () -> service.lookupPrincipalByName(owner), "While looking up user %s", owner); uncheck(() -> Files.setOwner(path, principal)); return this; } public String getGroup() { return getAttributes().group(); } public UnixPath setGroup(String group) { UserPrincipalLookupService service = path.getFileSystem().getUserPrincipalLookupService(); GroupPrincipal principal = uncheck( () -> service.lookupPrincipalByGroupName(group), "while looking up group %s", group); uncheck(() -> Files.getFileAttributeView(path, PosixFileAttributeView.class).setGroup(principal)); return this; } public Instant getLastModifiedTime() { return getAttributes().lastModifiedTime(); } public FileAttributes getAttributes() { PosixFileAttributes attributes = uncheck(() -> Files.getFileAttributeView(path, PosixFileAttributeView.class).readAttributes()); return new FileAttributes(attributes); } public Optional<FileAttributes> getAttributesIfExists() { return ifExists(this::getAttributes); } public UnixPath createNewFile() { uncheck(() -> Files.createFile(path)); return this; } public UnixPath createNewFile(String permissions) { FileAttribute<?> attribute = PosixFilePermissions.asFileAttribute(PosixFilePermissions.fromString(permissions)); uncheck(() -> Files.createFile(path, attribute)); return this; } public UnixPath createParents() { Path parent = path.getParent(); if (!Files.isDirectory(parent)) { uncheck(() -> Files.createDirectories(parent)); } return this; } public UnixPath createDirectory(String permissions) { Set<PosixFilePermission> set = getPosixFilePermissionsFromString(permissions); FileAttribute<Set<PosixFilePermission>> attribute = PosixFilePermissions.asFileAttribute(set); uncheck(() -> Files.createDirectory(path, attribute)); return this; } public UnixPath createDirectory() { uncheck(() -> Files.createDirectory(path)); return this; } public boolean isDirectory() { return uncheck(() -> Files.isDirectory(path)); } /** * Similar to rm -rf file: * - It's not an error if file doesn't exist * - If file is a directory, it and all content is removed * - For symlinks: Only the symlink is removed, not what the symlink points to */ public boolean deleteRecursively() { if (isDirectory()) { for (UnixPath path : listContentsOfDirectory()) { path.deleteRecursively(); } } return uncheck(() -> Files.deleteIfExists(path)); } public UnixPath deleteIfExists() { uncheck(() -> Files.deleteIfExists(path)); return this; } public List<UnixPath> listContentsOfDirectory() { try (Stream<Path> stream = Files.list(path)){ return stream .map(UnixPath::new) .collect(Collectors.toList()); } catch (NoSuchFileException ignored) { return Collections.emptyList(); } catch (IOException e) { throw new RuntimeException("Failed to list contents of directory " + path.toAbsolutePath(), e); } } /** This path must be on the same file system as the to-path. Returns UnixPath of 'to'. */ public UnixPath atomicMove(Path to) { uncheck(() -> Files.move(path, to, StandardCopyOption.ATOMIC_MOVE)); return new UnixPath(to); } public boolean moveIfExists(Path to) { try { Files.move(path, to); return true; } catch (NoSuchFileException ignored) { return false; } catch (IOException e) { throw new UncheckedIOException(e); } } /** * Creates a symbolic link from {@code link} to {@code this} (the target) * @param link the path for the symbolic link * @return the path to the symbolic link */ public UnixPath createSymbolicLink(Path link) { uncheck(() -> Files.createSymbolicLink(link, path)); return new UnixPath(link); } @Override public String toString() { return path.toString(); } private Set<PosixFilePermission> getPosixFilePermissionsFromString(String permissions) { try { return PosixFilePermissions.fromString(permissions); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Failed to set permissions '" + permissions + "' on path " + path, e); } } }
class UnixPath { private final Path path; public UnixPath(Path path) { this.path = path; } public UnixPath(String path) { this(Paths.get(path)); } public Path toPath() { return path; } public boolean exists() { return Files.exists(path); } public String readUtf8File() { return new String(readBytes(), StandardCharsets.UTF_8); } public byte[] readBytes() { return uncheck(() -> Files.readAllBytes(path)); } public UnixPath writeUtf8File(String content, OpenOption... options) { return writeBytes(content.getBytes(StandardCharsets.UTF_8), options); } public UnixPath writeBytes(byte[] content, OpenOption... options) { uncheck(() -> Files.write(path, content, options)); return this; } public String getPermissions() { return getAttributes().permissions(); } /** * @param permissions Example: "rwxr-x---" means rwx for owner, rx for group, * and no permissions for others. */ public UnixPath setPermissions(String permissions) { Set<PosixFilePermission> permissionSet = getPosixFilePermissionsFromString(permissions); uncheck(() -> Files.setPosixFilePermissions(path, permissionSet)); return this; } public String getOwner() { return getAttributes().owner(); } public UnixPath setOwner(String owner) { UserPrincipalLookupService service = path.getFileSystem().getUserPrincipalLookupService(); UserPrincipal principal = uncheck( () -> service.lookupPrincipalByName(owner), "While looking up user %s", owner); uncheck(() -> Files.setOwner(path, principal)); return this; } public String getGroup() { return getAttributes().group(); } public UnixPath setGroup(String group) { UserPrincipalLookupService service = path.getFileSystem().getUserPrincipalLookupService(); GroupPrincipal principal = uncheck( () -> service.lookupPrincipalByGroupName(group), "while looking up group %s", group); uncheck(() -> Files.getFileAttributeView(path, PosixFileAttributeView.class).setGroup(principal)); return this; } public Instant getLastModifiedTime() { return getAttributes().lastModifiedTime(); } public FileAttributes getAttributes() { PosixFileAttributes attributes = uncheck(() -> Files.getFileAttributeView(path, PosixFileAttributeView.class).readAttributes()); return new FileAttributes(attributes); } public Optional<FileAttributes> getAttributesIfExists() { return ifExists(this::getAttributes); } public UnixPath createNewFile() { uncheck(() -> Files.createFile(path)); return this; } public UnixPath createNewFile(String permissions) { FileAttribute<?> attribute = PosixFilePermissions.asFileAttribute(PosixFilePermissions.fromString(permissions)); uncheck(() -> Files.createFile(path, attribute)); return this; } public UnixPath createParents() { Path parent = path.getParent(); if (!Files.isDirectory(parent)) { uncheck(() -> Files.createDirectories(parent)); } return this; } public UnixPath createDirectory(String permissions) { Set<PosixFilePermission> set = getPosixFilePermissionsFromString(permissions); FileAttribute<Set<PosixFilePermission>> attribute = PosixFilePermissions.asFileAttribute(set); uncheck(() -> Files.createDirectory(path, attribute)); return this; } public UnixPath createDirectory() { uncheck(() -> Files.createDirectory(path)); return this; } public boolean isDirectory() { return uncheck(() -> Files.isDirectory(path)); } /** * Similar to rm -rf file: * - It's not an error if file doesn't exist * - If file is a directory, it and all content is removed * - For symlinks: Only the symlink is removed, not what the symlink points to */ public boolean deleteRecursively() { if (isDirectory()) { for (UnixPath path : listContentsOfDirectory()) { path.deleteRecursively(); } } return uncheck(() -> Files.deleteIfExists(path)); } public UnixPath deleteIfExists() { uncheck(() -> Files.deleteIfExists(path)); return this; } public List<UnixPath> listContentsOfDirectory() { try (Stream<Path> stream = Files.list(path)){ return stream .map(UnixPath::new) .collect(Collectors.toList()); } catch (NoSuchFileException ignored) { return Collections.emptyList(); } catch (IOException e) { throw new RuntimeException("Failed to list contents of directory " + path.toAbsolutePath(), e); } } /** This path must be on the same file system as the to-path. Returns UnixPath of 'to'. */ public UnixPath atomicMove(Path to) { uncheck(() -> Files.move(path, to, StandardCopyOption.ATOMIC_MOVE)); return new UnixPath(to); } public boolean moveIfExists(Path to) { try { Files.move(path, to); return true; } catch (NoSuchFileException ignored) { return false; } catch (IOException e) { throw new UncheckedIOException(e); } } /** * Creates a symbolic link from {@code link} to {@code this} (the target) * @param link the path for the symbolic link * @return the path to the symbolic link */ public UnixPath createSymbolicLink(Path link) { uncheck(() -> Files.createSymbolicLink(link, path)); return new UnixPath(link); } @Override public String toString() { return path.toString(); } private Set<PosixFilePermission> getPosixFilePermissionsFromString(String permissions) { try { return PosixFilePermissions.fromString(permissions); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Failed to set permissions '" + permissions + "' on path " + path, e); } } }
this would be sent as a double, not a tensor (auto-unboxing)
public void testFeatureData() { Cursor features = new Slime().setObject(); features.setDouble("scalar1", 1.5); features.setDouble("scalar2", 2.5); Tensor tensor1 = Tensor.from("tensor(x[3]):[1.5, 2, 2.5]"); features.setData("tensor1", TypedBinaryFormat.encode(tensor1)); Tensor tensor2 = Tensor.from(0.5); features.setData("tensor2", TypedBinaryFormat.encode(tensor2)); FeatureData featureData = new FeatureData(new SlimeAdapter(features)); assertEquals("scalar1,scalar2,tensor1,tensor2", featureData.featureNames().stream().sorted().collect(Collectors.joining(","))); assertEquals(1.5, featureData.getDouble("scalar1"), delta); assertEquals(2.5, featureData.getDouble("scalar2"), delta); assertEquals(Tensor.from(1.5), featureData.getTensor("scalar1")); assertEquals(Tensor.from(2.5), featureData.getTensor("scalar2")); assertEquals(tensor1, featureData.getTensor("tensor1")); assertEquals(tensor2, featureData.getTensor("tensor2")); String expectedJson = "{" + "\"scalar1\":1.5," + "\"scalar2\":2.5," + "\"tensor1\":{\"type\":\"tensor(x[3])\",\"cells\":[{\"address\":{\"x\":\"0\"},\"value\":1.5},{\"address\":{\"x\":\"1\"},\"value\":2.0},{\"address\":{\"x\":\"2\"},\"value\":2.5}]}," + "\"tensor2\":{\"type\":\"tensor()\",\"cells\":[{\"address\":{},\"value\":0.5}]}" + "}"; assertEquals(expectedJson, featureData.toJson()); }
Tensor tensor2 = Tensor.from(0.5);
public void testFeatureData() { Cursor features = new Slime().setObject(); features.setDouble("scalar1", 1.5); features.setDouble("scalar2", 2.5); Tensor tensor1 = Tensor.from("tensor(x[3]):[1.5, 2, 2.5]"); features.setData("tensor1", TypedBinaryFormat.encode(tensor1)); Tensor tensor2 = Tensor.from(0.5); features.setData("tensor2", TypedBinaryFormat.encode(tensor2)); FeatureData featureData = new FeatureData(new SlimeAdapter(features)); assertEquals("scalar1,scalar2,tensor1,tensor2", featureData.featureNames().stream().sorted().collect(Collectors.joining(","))); assertEquals(1.5, featureData.getDouble("scalar1"), delta); assertEquals(2.5, featureData.getDouble("scalar2"), delta); assertEquals(Tensor.from(1.5), featureData.getTensor("scalar1")); assertEquals(Tensor.from(2.5), featureData.getTensor("scalar2")); assertEquals(tensor1, featureData.getTensor("tensor1")); assertEquals(tensor2, featureData.getTensor("tensor2")); String expectedJson = "{" + "\"scalar1\":1.5," + "\"scalar2\":2.5," + "\"tensor1\":{\"type\":\"tensor(x[3])\",\"cells\":[{\"address\":{\"x\":\"0\"},\"value\":1.5},{\"address\":{\"x\":\"1\"},\"value\":2.0},{\"address\":{\"x\":\"2\"},\"value\":2.5}]}," + "\"tensor2\":{\"type\":\"tensor()\",\"cells\":[{\"address\":{},\"value\":0.5}]}" + "}"; assertEquals(expectedJson, featureData.toJson()); }
class FeatureDataTestCase { private static final double delta = 0.00000001; @Test }
class FeatureDataTestCase { private static final double delta = 0.00000001; @Test }
What about Vespa 6 nodes?
private void pushMetricsToContainer(NodeAgentContext context, List<DimensionMetrics> metrics) { StringBuilder params = new StringBuilder(); try { for (DimensionMetrics dimensionMetrics : metrics) { params.append(dimensionMetrics.toSecretAgentReport()); } String wrappedMetrics = "s:" + params.toString(); String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19095", "setExtraMetrics", wrappedMetrics}; dockerOperations.executeCommandInContainerAsRoot(context, 5L, command); } catch (JsonProcessingException | DockerExecTimeoutException e) { context.log(logger, LogLevel.WARNING, "Failed to push metrics to container", e); } }
String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19095", "setExtraMetrics", wrappedMetrics};
private void pushMetricsToContainer(NodeAgentContext context, List<DimensionMetrics> metrics) { StringBuilder params = new StringBuilder(); try { for (DimensionMetrics dimensionMetrics : metrics) { params.append(dimensionMetrics.toSecretAgentReport()); } String wrappedMetrics = "s:" + params.toString(); int port = context.node().getVespaVersion().map(version -> version.getMajor() == 6).orElse(false) ? 19091 : 19095; String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:" + port, "setExtraMetrics", wrappedMetrics}; dockerOperations.executeCommandInContainerAsRoot(context, 5L, command); } catch (JsonProcessingException | DockerExecTimeoutException e) { context.log(logger, LogLevel.WARNING, "Failed to push metrics to container", e); } }
class NodeAgentImpl implements NodeAgent { private static final long BYTES_IN_GB = 1_000_000_000L; private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName()); private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean hasResumedNode = false; private boolean hasStartedServices = true; private final NodeAgentContextSupplier contextSupplier; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final DockerOperations dockerOperations; private final StorageMaintainer storageMaintainer; private final Optional<CredentialsMaintainer> credentialsMaintainer; private final Optional<AclMaintainer> aclMaintainer; private final Optional<HealthChecker> healthChecker; private final DoubleFlag containerCpuCap; private int numberOfUnhandledException = 0; private DockerImage imageBeingDownloaded = null; private long currentRebootGeneration = 0; private Optional<Long> currentRestartGeneration = Optional.empty(); private final Thread loopThread; /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * STARTING state is set just before we attempt to start a container, if successful we move to the next state. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, STARTING, UNKNOWN } private ContainerState containerState = UNKNOWN; private NodeSpec lastNode = null; private CpuUsageReporter lastCpuMetric = new CpuUsageReporter(); public NodeAgentImpl( final NodeAgentContextSupplier contextSupplier, final NodeRepository nodeRepository, final Orchestrator orchestrator, final DockerOperations dockerOperations, final StorageMaintainer storageMaintainer, final FlagSource flagSource, final Optional<CredentialsMaintainer> credentialsMaintainer, final Optional<AclMaintainer> aclMaintainer, final Optional<HealthChecker> healthChecker) { this.contextSupplier = contextSupplier; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.dockerOperations = dockerOperations; this.storageMaintainer = storageMaintainer; this.credentialsMaintainer = credentialsMaintainer; this.aclMaintainer = aclMaintainer; this.healthChecker = healthChecker; this.containerCpuCap = Flags.CONTAINER_CPU_CAP.bindTo(flagSource) .with(FetchVector.Dimension.HOSTNAME, contextSupplier.currentContext().node().getHostname()); this.loopThread = new Thread(() -> { while (!terminated.get()) { try { NodeAgentContext context = contextSupplier.nextContext(); converge(context); } catch (InterruptedException ignored) { } } }); this.loopThread.setName("tick-" + contextSupplier.currentContext().hostname()); } @Override public void start() { loopThread.start(); } @Override public void stopForRemoval() { if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } contextSupplier.interrupt(); do { try { loopThread.join(); } catch (InterruptedException ignored) { } } while (loopThread.isAlive()); contextSupplier.currentContext().log(logger, "Stopped"); } void startServicesIfNeeded(NodeAgentContext context) { if (!hasStartedServices) { context.log(logger, "Starting services"); dockerOperations.startServices(context); hasStartedServices = true; } } void resumeNodeIfNeeded(NodeAgentContext context) { if (!hasResumedNode) { context.log(logger, LogLevel.DEBUG, "Starting optional node program resume command"); dockerOperations.resumeNode(context); hasResumedNode = true; } } private void updateNodeRepoWithCurrentAttributes(NodeAgentContext context) { final NodeAttributes currentNodeAttributes = new NodeAttributes(); final NodeAttributes newNodeAttributes = new NodeAttributes(); if (context.node().getWantedRestartGeneration().isPresent() && !Objects.equals(context.node().getCurrentRestartGeneration(), currentRestartGeneration)) { currentNodeAttributes.withRestartGeneration(context.node().getCurrentRestartGeneration()); newNodeAttributes.withRestartGeneration(currentRestartGeneration); } if (!Objects.equals(context.node().getCurrentRebootGeneration(), currentRebootGeneration)) { currentNodeAttributes.withRebootGeneration(context.node().getCurrentRebootGeneration()); newNodeAttributes.withRebootGeneration(currentRebootGeneration); } Optional<DockerImage> actualDockerImage = context.node().getWantedDockerImage().filter(n -> containerState == UNKNOWN); if (!Objects.equals(context.node().getCurrentDockerImage(), actualDockerImage)) { DockerImage currentImage = context.node().getCurrentDockerImage().orElse(DockerImage.EMPTY); DockerImage newImage = actualDockerImage.orElse(DockerImage.EMPTY); currentNodeAttributes.withDockerImage(currentImage); currentNodeAttributes.withVespaVersion(currentImage.tagAsVersion()); newNodeAttributes.withDockerImage(newImage); newNodeAttributes.withVespaVersion(newImage.tagAsVersion()); } publishStateToNodeRepoIfChanged(context, currentNodeAttributes, newNodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAgentContext context, NodeAttributes currentAttributes, NodeAttributes newAttributes) { if (!currentAttributes.equals(newAttributes)) { context.log(logger, "Publishing new set of attributes to node repo: %s -> %s", currentAttributes, newAttributes); nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes); } } private void startContainer(NodeAgentContext context) { ContainerData containerData = createContainerData(context); dockerOperations.createContainer(context, containerData, getContainerResources(context)); dockerOperations.startContainer(context); lastCpuMetric = new CpuUsageReporter(); hasStartedServices = true; hasResumedNode = false; context.log(logger, "Container successfully started, new containerState is " + containerState); } private Optional<Container> removeContainerIfNeededUpdateContainerState( NodeAgentContext context, Optional<Container> existingContainer) { if (existingContainer.isPresent()) { Optional<String> reason = shouldRemoveContainer(context, existingContainer.get()); if (reason.isPresent()) { removeContainer(context, existingContainer.get(), reason.get(), false); return Optional.empty(); } shouldRestartServices(context.node()).ifPresent(restartReason -> { context.log(logger, "Will restart services: " + restartReason); restartServices(context, existingContainer.get()); currentRestartGeneration = context.node().getWantedRestartGeneration(); }); } return existingContainer; } private Optional<String> shouldRestartServices(NodeSpec node) { if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty(); if (currentRestartGeneration.get() < node.getWantedRestartGeneration().get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + currentRestartGeneration.get() + " -> " + node.getWantedRestartGeneration().get()); } return Optional.empty(); } private void restartServices(NodeAgentContext context, Container existingContainer) { if (existingContainer.state.isRunning() && context.node().getState() == NodeState.active) { context.log(logger, "Restarting services"); orchestratorSuspendNode(context); dockerOperations.restartVespa(context); } } private void stopServices() { NodeAgentContext context = contextSupplier.currentContext(); context.log(logger, "Stopping services"); if (containerState == ABSENT) return; try { hasStartedServices = hasResumedNode = false; dockerOperations.stopServices(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } } @Override public void stopForHostSuspension() { NodeAgentContext context = contextSupplier.currentContext(); getContainer(context).ifPresent(container -> removeContainer(context, container, "suspending host", true)); } public void suspend() { NodeAgentContext context = contextSupplier.currentContext(); context.log(logger, "Suspending services on node"); if (containerState == ABSENT) return; try { hasResumedNode = false; dockerOperations.suspendNode(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } catch (RuntimeException e) { context.log(logger, LogLevel.WARNING, "Failed trying to suspend container", e); } } private Optional<String> shouldRemoveContainer(NodeAgentContext context, Container existingContainer) { final NodeState nodeState = context.node().getState(); if (nodeState == NodeState.dirty || nodeState == NodeState.provisioned) { return Optional.of("Node in state " + nodeState + ", container should no longer be running"); } if (context.node().getWantedDockerImage().isPresent() && !context.node().getWantedDockerImage().get().equals(existingContainer.image)) { return Optional.of("The node is supposed to run a new Docker image: " + existingContainer.image.asString() + " -> " + context.node().getWantedDockerImage().get().asString()); } if (!existingContainer.state.isRunning()) { return Optional.of("Container no longer running"); } if (currentRebootGeneration < context.node().getWantedRebootGeneration()) { return Optional.of(String.format("Container reboot wanted. Current: %d, Wanted: %d", currentRebootGeneration, context.node().getWantedRebootGeneration())); } ContainerResources wantedContainerResources = getContainerResources(context); if (!wantedContainerResources.equalsMemory(existingContainer.resources)) { return Optional.of("Container should be running with different memory allocation, wanted: " + wantedContainerResources.toStringMemory() + ", actual: " + existingContainer.resources.toStringMemory()); } if (containerState == STARTING) return Optional.of("Container failed to start"); return Optional.empty(); } private void removeContainer(NodeAgentContext context, Container existingContainer, String reason, boolean alreadySuspended) { context.log(logger, "Will remove container: " + reason); if (existingContainer.state.isRunning()) { if (!alreadySuspended) { orchestratorSuspendNode(context); } try { if (context.node().getState() != NodeState.dirty) { suspend(); } stopServices(); } catch (Exception e) { context.log(logger, LogLevel.WARNING, "Failed stopping services, ignoring", e); } } storageMaintainer.handleCoreDumpsForContainer(context, Optional.of(existingContainer)); dockerOperations.removeContainer(context, existingContainer); currentRebootGeneration = context.node().getWantedRebootGeneration(); containerState = ABSENT; context.log(logger, "Container successfully removed, new containerState is " + containerState); } private void updateContainerIfNeeded(NodeAgentContext context, Container existingContainer) { ContainerResources wantedContainerResources = getContainerResources(context); if (wantedContainerResources.equalsCpu(existingContainer.resources)) return; context.log(logger, "Container should be running with different CPU allocation, wanted: %s, current: %s", wantedContainerResources.toStringCpu(), existingContainer.resources.toStringCpu()); orchestratorSuspendNode(context); dockerOperations.updateContainer(context, wantedContainerResources); } private ContainerResources getContainerResources(NodeAgentContext context) { double cpuCap = noCpuCap(context.zone()) ? 0 : context.node().getOwner() .map(NodeOwner::asApplicationId) .map(appId -> containerCpuCap.with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm())) .orElse(containerCpuCap) .value() * context.node().getMinCpuCores(); return ContainerResources.from(cpuCap, context.node().getMinCpuCores(), context.node().getMinMainMemoryAvailableGb()); } private boolean noCpuCap(ZoneApi zone) { return zone.getEnvironment() == Environment.dev || (zone.getSystemName().isCd() && zone.getEnvironment() != Environment.prod); } private void scheduleDownLoadIfNeeded(NodeSpec node, Optional<Container> container) { if (node.getWantedDockerImage().equals(container.map(c -> c.image))) return; if (dockerOperations.pullImageAsyncIfNeeded(node.getWantedDockerImage().get())) { imageBeingDownloaded = node.getWantedDockerImage().get(); } else if (imageBeingDownloaded != null) { imageBeingDownloaded = null; } } public void converge(NodeAgentContext context) { try { doConverge(context); } catch (ConvergenceException e) { context.log(logger, e.getMessage()); } catch (ContainerNotFoundException e) { containerState = ABSENT; context.log(logger, LogLevel.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState); } catch (DockerException e) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Caught a DockerException", e); } catch (Throwable e) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Unhandled exception, ignoring", e); } } void doConverge(NodeAgentContext context) { NodeSpec node = context.node(); Optional<Container> container = getContainer(context); if (!node.equals(lastNode)) { logChangesToNodeSpec(context, lastNode, node); if (currentRebootGeneration < node.getCurrentRebootGeneration()) currentRebootGeneration = node.getCurrentRebootGeneration(); if (currentRestartGeneration.isPresent() != node.getCurrentRestartGeneration().isPresent() || currentRestartGeneration.map(current -> current < node.getCurrentRestartGeneration().get()).orElse(false)) currentRestartGeneration = node.getCurrentRestartGeneration(); if (container.map(c -> c.state.isRunning()).orElse(false)) { storageMaintainer.writeMetricsConfig(context); } lastNode = node; } switch (node.getState()) { case ready: case reserved: case parked: case failed: case inactive: removeContainerIfNeededUpdateContainerState(context, container); updateNodeRepoWithCurrentAttributes(context); break; case active: storageMaintainer.handleCoreDumpsForContainer(context, container); storageMaintainer.getDiskUsageFor(context) .map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb()) .filter(diskUtil -> diskUtil >= 0.8) .ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context)); scheduleDownLoadIfNeeded(node, container); if (isDownloadingImage()) { context.log(logger, "Waiting for image to download " + imageBeingDownloaded.asString()); return; } container = removeContainerIfNeededUpdateContainerState(context, container); credentialsMaintainer.ifPresent(maintainer -> maintainer.converge(context)); if (! container.isPresent()) { containerState = STARTING; startContainer(context); containerState = UNKNOWN; } else { updateContainerIfNeeded(context, container.get()); } aclMaintainer.ifPresent(maintainer -> maintainer.converge(context)); startServicesIfNeeded(context); resumeNodeIfNeeded(context); healthChecker.ifPresent(checker -> checker.verifyHealth(context)); updateNodeRepoWithCurrentAttributes(context); context.log(logger, "Call resume against Orchestrator"); orchestrator.resume(context.hostname().value()); break; case provisioned: nodeRepository.setNodeState(context.hostname().value(), NodeState.dirty); break; case dirty: removeContainerIfNeededUpdateContainerState(context, container); context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready"); credentialsMaintainer.ifPresent(maintainer -> maintainer.clearCredentials(context)); storageMaintainer.archiveNodeStorage(context); updateNodeRepoWithCurrentAttributes(context); nodeRepository.setNodeState(context.hostname().value(), NodeState.ready); break; default: throw new ConvergenceException("UNKNOWN STATE " + node.getState().name()); } } private static void logChangesToNodeSpec(NodeAgentContext context, NodeSpec lastNode, NodeSpec node) { StringBuilder builder = new StringBuilder(); appendIfDifferent(builder, "state", lastNode, node, NodeSpec::getState); if (builder.length() > 0) { context.log(logger, LogLevel.INFO, "Changes to node: " + builder.toString()); } } private static <T> String fieldDescription(T value) { return value == null ? "[absent]" : value.toString(); } private static <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) { T oldValue = oldNode == null ? null : getter.apply(oldNode); T newValue = getter.apply(newNode); if (!Objects.equals(oldValue, newValue)) { if (builder.length() > 0) { builder.append(", "); } builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue)); } } @SuppressWarnings("unchecked") public void updateContainerNodeMetrics() { if (containerState != UNKNOWN) return; final NodeAgentContext context = contextSupplier.currentContext(); final NodeSpec node = context.node(); Optional<ContainerStats> containerStats = dockerOperations.getContainerStats(context); if (!containerStats.isPresent()) return; Dimensions.Builder dimensionsBuilder = new Dimensions.Builder() .add("host", context.hostname().value()) .add("role", SecretAgentCheckConfig.nodeTypeToRole(context.nodeType())) .add("state", node.getState().toString()); node.getParentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent)); node.getAllowedToBeDown().ifPresent(allowed -> dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS")); Dimensions dimensions = dimensionsBuilder.build(); ContainerStats stats = containerStats.get(); final String APP = MetricReceiverWrapper.APPLICATION_NODE; final int totalNumCpuCores = stats.getCpuStats().getOnlineCpus(); final long cpuContainerKernelTime = stats.getCpuStats().getUsageInKernelMode(); final long cpuContainerTotalTime = stats.getCpuStats().getTotalUsage(); final long cpuSystemTotalTime = stats.getCpuStats().getSystemCpuUsage(); final long memoryTotalBytes = stats.getMemoryStats().getLimit(); final long memoryTotalBytesUsage = stats.getMemoryStats().getUsage(); final long memoryTotalBytesCache = stats.getMemoryStats().getCache(); final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB); final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context); lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime); final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores; double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio; double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio; long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache; double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes; double memoryTotalUsageRatio = (double) memoryTotalBytesUsage / memoryTotalBytes; Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes); List<DimensionMetrics> metrics = new ArrayList<>(); DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions) .withMetric("mem.limit", memoryTotalBytes) .withMetric("mem.used", memoryTotalBytesUsed) .withMetric("mem.util", 100 * memoryUsageRatio) .withMetric("mem_total.used", memoryTotalBytesUsage) .withMetric("mem_total.util", 100 * memoryTotalUsageRatio) .withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated) .withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated) .withMetric("cpu.vcpus", node.getMinCpuCores()) .withMetric("disk.limit", diskTotalBytes); diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed)); diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio)); metrics.add(systemMetricsBuilder.build()); stats.getNetworks().forEach((interfaceName, interfaceStats) -> { Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build(); DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims) .withMetric("net.in.bytes", interfaceStats.getRxBytes()) .withMetric("net.in.errors", interfaceStats.getRxErrors()) .withMetric("net.in.dropped", interfaceStats.getRxDropped()) .withMetric("net.out.bytes", interfaceStats.getTxBytes()) .withMetric("net.out.errors", interfaceStats.getTxErrors()) .withMetric("net.out.dropped", interfaceStats.getTxDropped()) .build(); metrics.add(networkMetrics); }); pushMetricsToContainer(context, metrics); } private Optional<Container> getContainer(NodeAgentContext context) { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = dockerOperations.getContainer(context); if (! container.isPresent()) containerState = ABSENT; return container; } @Override public boolean isDownloadingImage() { return imageBeingDownloaded != null; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } } private void orchestratorSuspendNode(NodeAgentContext context) { if (context.node().getState() != NodeState.active) return; context.log(logger, "Ask Orchestrator for permission to suspend node"); try { orchestrator.suspend(context.hostname().value()); } catch (OrchestratorException e) { try { aclMaintainer.ifPresent(maintainer -> maintainer.converge(context)); } catch (RuntimeException suppressed) { logger.log(LogLevel.WARNING, "Suppressing ACL update failure: " + suppressed); e.addSuppressed(suppressed); } throw e; } } protected ContainerData createContainerData(NodeAgentContext context) { return new ContainerData() { @Override public void addFile(Path pathInContainer, String data) { throw new UnsupportedOperationException("addFile not implemented"); } @Override public void createSymlink(Path symlink, Path target) { throw new UnsupportedOperationException("createSymlink not implemented"); } }; } }
class NodeAgentImpl implements NodeAgent { private static final long BYTES_IN_GB = 1_000_000_000L; private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName()); private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean hasResumedNode = false; private boolean hasStartedServices = true; private final NodeAgentContextSupplier contextSupplier; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final DockerOperations dockerOperations; private final StorageMaintainer storageMaintainer; private final Optional<CredentialsMaintainer> credentialsMaintainer; private final Optional<AclMaintainer> aclMaintainer; private final Optional<HealthChecker> healthChecker; private final DoubleFlag containerCpuCap; private int numberOfUnhandledException = 0; private DockerImage imageBeingDownloaded = null; private long currentRebootGeneration = 0; private Optional<Long> currentRestartGeneration = Optional.empty(); private final Thread loopThread; /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * STARTING state is set just before we attempt to start a container, if successful we move to the next state. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, STARTING, UNKNOWN } private ContainerState containerState = UNKNOWN; private NodeSpec lastNode = null; private CpuUsageReporter lastCpuMetric = new CpuUsageReporter(); public NodeAgentImpl( final NodeAgentContextSupplier contextSupplier, final NodeRepository nodeRepository, final Orchestrator orchestrator, final DockerOperations dockerOperations, final StorageMaintainer storageMaintainer, final FlagSource flagSource, final Optional<CredentialsMaintainer> credentialsMaintainer, final Optional<AclMaintainer> aclMaintainer, final Optional<HealthChecker> healthChecker) { this.contextSupplier = contextSupplier; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.dockerOperations = dockerOperations; this.storageMaintainer = storageMaintainer; this.credentialsMaintainer = credentialsMaintainer; this.aclMaintainer = aclMaintainer; this.healthChecker = healthChecker; this.containerCpuCap = Flags.CONTAINER_CPU_CAP.bindTo(flagSource) .with(FetchVector.Dimension.HOSTNAME, contextSupplier.currentContext().node().getHostname()); this.loopThread = new Thread(() -> { while (!terminated.get()) { try { NodeAgentContext context = contextSupplier.nextContext(); converge(context); } catch (InterruptedException ignored) { } } }); this.loopThread.setName("tick-" + contextSupplier.currentContext().hostname()); } @Override public void start() { loopThread.start(); } @Override public void stopForRemoval() { if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } contextSupplier.interrupt(); do { try { loopThread.join(); } catch (InterruptedException ignored) { } } while (loopThread.isAlive()); contextSupplier.currentContext().log(logger, "Stopped"); } void startServicesIfNeeded(NodeAgentContext context) { if (!hasStartedServices) { context.log(logger, "Starting services"); dockerOperations.startServices(context); hasStartedServices = true; } } void resumeNodeIfNeeded(NodeAgentContext context) { if (!hasResumedNode) { context.log(logger, LogLevel.DEBUG, "Starting optional node program resume command"); dockerOperations.resumeNode(context); hasResumedNode = true; } } private void updateNodeRepoWithCurrentAttributes(NodeAgentContext context) { final NodeAttributes currentNodeAttributes = new NodeAttributes(); final NodeAttributes newNodeAttributes = new NodeAttributes(); if (context.node().getWantedRestartGeneration().isPresent() && !Objects.equals(context.node().getCurrentRestartGeneration(), currentRestartGeneration)) { currentNodeAttributes.withRestartGeneration(context.node().getCurrentRestartGeneration()); newNodeAttributes.withRestartGeneration(currentRestartGeneration); } if (!Objects.equals(context.node().getCurrentRebootGeneration(), currentRebootGeneration)) { currentNodeAttributes.withRebootGeneration(context.node().getCurrentRebootGeneration()); newNodeAttributes.withRebootGeneration(currentRebootGeneration); } Optional<DockerImage> actualDockerImage = context.node().getWantedDockerImage().filter(n -> containerState == UNKNOWN); if (!Objects.equals(context.node().getCurrentDockerImage(), actualDockerImage)) { DockerImage currentImage = context.node().getCurrentDockerImage().orElse(DockerImage.EMPTY); DockerImage newImage = actualDockerImage.orElse(DockerImage.EMPTY); currentNodeAttributes.withDockerImage(currentImage); currentNodeAttributes.withVespaVersion(currentImage.tagAsVersion()); newNodeAttributes.withDockerImage(newImage); newNodeAttributes.withVespaVersion(newImage.tagAsVersion()); } publishStateToNodeRepoIfChanged(context, currentNodeAttributes, newNodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAgentContext context, NodeAttributes currentAttributes, NodeAttributes newAttributes) { if (!currentAttributes.equals(newAttributes)) { context.log(logger, "Publishing new set of attributes to node repo: %s -> %s", currentAttributes, newAttributes); nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes); } } private void startContainer(NodeAgentContext context) { ContainerData containerData = createContainerData(context); dockerOperations.createContainer(context, containerData, getContainerResources(context)); dockerOperations.startContainer(context); lastCpuMetric = new CpuUsageReporter(); hasStartedServices = true; hasResumedNode = false; context.log(logger, "Container successfully started, new containerState is " + containerState); } private Optional<Container> removeContainerIfNeededUpdateContainerState( NodeAgentContext context, Optional<Container> existingContainer) { if (existingContainer.isPresent()) { Optional<String> reason = shouldRemoveContainer(context, existingContainer.get()); if (reason.isPresent()) { removeContainer(context, existingContainer.get(), reason.get(), false); return Optional.empty(); } shouldRestartServices(context.node()).ifPresent(restartReason -> { context.log(logger, "Will restart services: " + restartReason); restartServices(context, existingContainer.get()); currentRestartGeneration = context.node().getWantedRestartGeneration(); }); } return existingContainer; } private Optional<String> shouldRestartServices(NodeSpec node) { if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty(); if (currentRestartGeneration.get() < node.getWantedRestartGeneration().get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + currentRestartGeneration.get() + " -> " + node.getWantedRestartGeneration().get()); } return Optional.empty(); } private void restartServices(NodeAgentContext context, Container existingContainer) { if (existingContainer.state.isRunning() && context.node().getState() == NodeState.active) { context.log(logger, "Restarting services"); orchestratorSuspendNode(context); dockerOperations.restartVespa(context); } } private void stopServices() { NodeAgentContext context = contextSupplier.currentContext(); context.log(logger, "Stopping services"); if (containerState == ABSENT) return; try { hasStartedServices = hasResumedNode = false; dockerOperations.stopServices(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } } @Override public void stopForHostSuspension() { NodeAgentContext context = contextSupplier.currentContext(); getContainer(context).ifPresent(container -> removeContainer(context, container, "suspending host", true)); } public void suspend() { NodeAgentContext context = contextSupplier.currentContext(); context.log(logger, "Suspending services on node"); if (containerState == ABSENT) return; try { hasResumedNode = false; dockerOperations.suspendNode(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } catch (RuntimeException e) { context.log(logger, LogLevel.WARNING, "Failed trying to suspend container", e); } } private Optional<String> shouldRemoveContainer(NodeAgentContext context, Container existingContainer) { final NodeState nodeState = context.node().getState(); if (nodeState == NodeState.dirty || nodeState == NodeState.provisioned) { return Optional.of("Node in state " + nodeState + ", container should no longer be running"); } if (context.node().getWantedDockerImage().isPresent() && !context.node().getWantedDockerImage().get().equals(existingContainer.image)) { return Optional.of("The node is supposed to run a new Docker image: " + existingContainer.image.asString() + " -> " + context.node().getWantedDockerImage().get().asString()); } if (!existingContainer.state.isRunning()) { return Optional.of("Container no longer running"); } if (currentRebootGeneration < context.node().getWantedRebootGeneration()) { return Optional.of(String.format("Container reboot wanted. Current: %d, Wanted: %d", currentRebootGeneration, context.node().getWantedRebootGeneration())); } ContainerResources wantedContainerResources = getContainerResources(context); if (!wantedContainerResources.equalsMemory(existingContainer.resources)) { return Optional.of("Container should be running with different memory allocation, wanted: " + wantedContainerResources.toStringMemory() + ", actual: " + existingContainer.resources.toStringMemory()); } if (containerState == STARTING) return Optional.of("Container failed to start"); return Optional.empty(); } private void removeContainer(NodeAgentContext context, Container existingContainer, String reason, boolean alreadySuspended) { context.log(logger, "Will remove container: " + reason); if (existingContainer.state.isRunning()) { if (!alreadySuspended) { orchestratorSuspendNode(context); } try { if (context.node().getState() != NodeState.dirty) { suspend(); } stopServices(); } catch (Exception e) { context.log(logger, LogLevel.WARNING, "Failed stopping services, ignoring", e); } } storageMaintainer.handleCoreDumpsForContainer(context, Optional.of(existingContainer)); dockerOperations.removeContainer(context, existingContainer); currentRebootGeneration = context.node().getWantedRebootGeneration(); containerState = ABSENT; context.log(logger, "Container successfully removed, new containerState is " + containerState); } private void updateContainerIfNeeded(NodeAgentContext context, Container existingContainer) { ContainerResources wantedContainerResources = getContainerResources(context); if (wantedContainerResources.equalsCpu(existingContainer.resources)) return; context.log(logger, "Container should be running with different CPU allocation, wanted: %s, current: %s", wantedContainerResources.toStringCpu(), existingContainer.resources.toStringCpu()); orchestratorSuspendNode(context); dockerOperations.updateContainer(context, wantedContainerResources); } private ContainerResources getContainerResources(NodeAgentContext context) { double cpuCap = noCpuCap(context.zone()) ? 0 : context.node().getOwner() .map(NodeOwner::asApplicationId) .map(appId -> containerCpuCap.with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm())) .orElse(containerCpuCap) .value() * context.node().getMinCpuCores(); return ContainerResources.from(cpuCap, context.node().getMinCpuCores(), context.node().getMinMainMemoryAvailableGb()); } private boolean noCpuCap(ZoneApi zone) { return zone.getEnvironment() == Environment.dev || (zone.getSystemName().isCd() && zone.getEnvironment() != Environment.prod); } private void scheduleDownLoadIfNeeded(NodeSpec node, Optional<Container> container) { if (node.getWantedDockerImage().equals(container.map(c -> c.image))) return; if (dockerOperations.pullImageAsyncIfNeeded(node.getWantedDockerImage().get())) { imageBeingDownloaded = node.getWantedDockerImage().get(); } else if (imageBeingDownloaded != null) { imageBeingDownloaded = null; } } public void converge(NodeAgentContext context) { try { doConverge(context); } catch (ConvergenceException e) { context.log(logger, e.getMessage()); } catch (ContainerNotFoundException e) { containerState = ABSENT; context.log(logger, LogLevel.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState); } catch (DockerException e) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Caught a DockerException", e); } catch (Throwable e) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Unhandled exception, ignoring", e); } } void doConverge(NodeAgentContext context) { NodeSpec node = context.node(); Optional<Container> container = getContainer(context); if (!node.equals(lastNode)) { logChangesToNodeSpec(context, lastNode, node); if (currentRebootGeneration < node.getCurrentRebootGeneration()) currentRebootGeneration = node.getCurrentRebootGeneration(); if (currentRestartGeneration.isPresent() != node.getCurrentRestartGeneration().isPresent() || currentRestartGeneration.map(current -> current < node.getCurrentRestartGeneration().get()).orElse(false)) currentRestartGeneration = node.getCurrentRestartGeneration(); if (container.map(c -> c.state.isRunning()).orElse(false)) { storageMaintainer.writeMetricsConfig(context); } lastNode = node; } switch (node.getState()) { case ready: case reserved: case parked: case failed: case inactive: removeContainerIfNeededUpdateContainerState(context, container); updateNodeRepoWithCurrentAttributes(context); break; case active: storageMaintainer.handleCoreDumpsForContainer(context, container); storageMaintainer.getDiskUsageFor(context) .map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb()) .filter(diskUtil -> diskUtil >= 0.8) .ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context)); scheduleDownLoadIfNeeded(node, container); if (isDownloadingImage()) { context.log(logger, "Waiting for image to download " + imageBeingDownloaded.asString()); return; } container = removeContainerIfNeededUpdateContainerState(context, container); credentialsMaintainer.ifPresent(maintainer -> maintainer.converge(context)); if (! container.isPresent()) { containerState = STARTING; startContainer(context); containerState = UNKNOWN; } else { updateContainerIfNeeded(context, container.get()); } aclMaintainer.ifPresent(maintainer -> maintainer.converge(context)); startServicesIfNeeded(context); resumeNodeIfNeeded(context); healthChecker.ifPresent(checker -> checker.verifyHealth(context)); updateNodeRepoWithCurrentAttributes(context); context.log(logger, "Call resume against Orchestrator"); orchestrator.resume(context.hostname().value()); break; case provisioned: nodeRepository.setNodeState(context.hostname().value(), NodeState.dirty); break; case dirty: removeContainerIfNeededUpdateContainerState(context, container); context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready"); credentialsMaintainer.ifPresent(maintainer -> maintainer.clearCredentials(context)); storageMaintainer.archiveNodeStorage(context); updateNodeRepoWithCurrentAttributes(context); nodeRepository.setNodeState(context.hostname().value(), NodeState.ready); break; default: throw new ConvergenceException("UNKNOWN STATE " + node.getState().name()); } } private static void logChangesToNodeSpec(NodeAgentContext context, NodeSpec lastNode, NodeSpec node) { StringBuilder builder = new StringBuilder(); appendIfDifferent(builder, "state", lastNode, node, NodeSpec::getState); if (builder.length() > 0) { context.log(logger, LogLevel.INFO, "Changes to node: " + builder.toString()); } } private static <T> String fieldDescription(T value) { return value == null ? "[absent]" : value.toString(); } private static <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) { T oldValue = oldNode == null ? null : getter.apply(oldNode); T newValue = getter.apply(newNode); if (!Objects.equals(oldValue, newValue)) { if (builder.length() > 0) { builder.append(", "); } builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue)); } } @SuppressWarnings("unchecked") public void updateContainerNodeMetrics() { if (containerState != UNKNOWN) return; final NodeAgentContext context = contextSupplier.currentContext(); final NodeSpec node = context.node(); Optional<ContainerStats> containerStats = dockerOperations.getContainerStats(context); if (!containerStats.isPresent()) return; Dimensions.Builder dimensionsBuilder = new Dimensions.Builder() .add("host", context.hostname().value()) .add("role", SecretAgentCheckConfig.nodeTypeToRole(context.nodeType())) .add("state", node.getState().toString()); node.getParentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent)); node.getAllowedToBeDown().ifPresent(allowed -> dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS")); Dimensions dimensions = dimensionsBuilder.build(); ContainerStats stats = containerStats.get(); final String APP = MetricReceiverWrapper.APPLICATION_NODE; final int totalNumCpuCores = stats.getCpuStats().getOnlineCpus(); final long cpuContainerKernelTime = stats.getCpuStats().getUsageInKernelMode(); final long cpuContainerTotalTime = stats.getCpuStats().getTotalUsage(); final long cpuSystemTotalTime = stats.getCpuStats().getSystemCpuUsage(); final long memoryTotalBytes = stats.getMemoryStats().getLimit(); final long memoryTotalBytesUsage = stats.getMemoryStats().getUsage(); final long memoryTotalBytesCache = stats.getMemoryStats().getCache(); final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB); final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context); lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime); final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores; double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio; double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio; long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache; double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes; double memoryTotalUsageRatio = (double) memoryTotalBytesUsage / memoryTotalBytes; Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes); List<DimensionMetrics> metrics = new ArrayList<>(); DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions) .withMetric("mem.limit", memoryTotalBytes) .withMetric("mem.used", memoryTotalBytesUsed) .withMetric("mem.util", 100 * memoryUsageRatio) .withMetric("mem_total.used", memoryTotalBytesUsage) .withMetric("mem_total.util", 100 * memoryTotalUsageRatio) .withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated) .withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated) .withMetric("cpu.vcpus", node.getMinCpuCores()) .withMetric("disk.limit", diskTotalBytes); diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed)); diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio)); metrics.add(systemMetricsBuilder.build()); stats.getNetworks().forEach((interfaceName, interfaceStats) -> { Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build(); DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims) .withMetric("net.in.bytes", interfaceStats.getRxBytes()) .withMetric("net.in.errors", interfaceStats.getRxErrors()) .withMetric("net.in.dropped", interfaceStats.getRxDropped()) .withMetric("net.out.bytes", interfaceStats.getTxBytes()) .withMetric("net.out.errors", interfaceStats.getTxErrors()) .withMetric("net.out.dropped", interfaceStats.getTxDropped()) .build(); metrics.add(networkMetrics); }); pushMetricsToContainer(context, metrics); } private Optional<Container> getContainer(NodeAgentContext context) { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = dockerOperations.getContainer(context); if (! container.isPresent()) containerState = ABSENT; return container; } @Override public boolean isDownloadingImage() { return imageBeingDownloaded != null; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } } private void orchestratorSuspendNode(NodeAgentContext context) { if (context.node().getState() != NodeState.active) return; context.log(logger, "Ask Orchestrator for permission to suspend node"); try { orchestrator.suspend(context.hostname().value()); } catch (OrchestratorException e) { try { aclMaintainer.ifPresent(maintainer -> maintainer.converge(context)); } catch (RuntimeException suppressed) { logger.log(LogLevel.WARNING, "Suppressing ACL update failure: " + suppressed); e.addSuppressed(suppressed); } throw e; } } protected ContainerData createContainerData(NodeAgentContext context) { return new ContainerData() { @Override public void addFile(Path pathInContainer, String data) { throw new UnsupportedOperationException("addFile not implemented"); } @Override public void createSymlink(Path symlink, Path target) { throw new UnsupportedOperationException("createSymlink not implemented"); } }; } }
Yes, thanks, fixed
public void shutdownSourceConnections() { synchronized (activeSubscribersLock) { for (Subscriber subscriber : activeSubscribers.values()) { subscriber.cancel(); } activeSubscribers.clear(); } exec.shutdown(); if (requester != null) requester.close(); }
if (requester != null)
public void shutdownSourceConnections() { synchronized (activeSubscribersLock) { for (Subscriber subscriber : activeSubscribers.values()) { subscriber.cancel(); } activeSubscribers.clear(); } exec.shutdown(); requester.close(); }
class RpcConfigSourceClient implements ConfigSourceClient { private final static Logger log = Logger.getLogger(RpcConfigSourceClient.class.getName()); private final Supervisor supervisor = new Supervisor(new Transport()); private final RpcServer rpcServer; private final ConfigSourceSet configSourceSet; private final HashMap<ConfigCacheKey, Subscriber> activeSubscribers = new HashMap<>(); private final Object activeSubscribersLock = new Object(); private final MemoryCache memoryCache; private final DelayedResponses delayedResponses; private final TimingValues timingValues; private final ExecutorService exec; private final JRTConfigRequester requester; RpcConfigSourceClient(RpcServer rpcServer, ConfigSourceSet configSourceSet, MemoryCache memoryCache, TimingValues timingValues, DelayedResponses delayedResponses) { this.rpcServer = rpcServer; this.configSourceSet = configSourceSet; this.memoryCache = memoryCache; this.delayedResponses = delayedResponses; this.timingValues = timingValues; checkConfigSources(); exec = Executors.newCachedThreadPool(new DaemonThreadFactory("subscriber-")); requester = new JRTConfigRequester(new JRTConnectionPool(configSourceSet), timingValues); } /** * Checks if config sources are available */ private void checkConfigSources() { if (configSourceSet == null || configSourceSet.getSources() == null || configSourceSet.getSources().size() == 0) { log.log(LogLevel.WARNING, "No config sources defined, could not check connection"); } else { Request req = new Request("ping"); for (String configSource : configSourceSet.getSources()) { Spec spec = new Spec(configSource); Target target = supervisor.connect(spec); target.invokeSync(req, 30.0); if (target.isValid()) { log.log(LogLevel.DEBUG, () -> "Created connection to config source at " + spec.toString()); return; } else { log.log(LogLevel.INFO, "Could not connect to config source at " + spec.toString()); } target.close(); } String extra = ""; log.log(LogLevel.INFO, "Could not connect to any config source in set " + configSourceSet.toString() + ", please make sure config server(s) are running. " + extra); } } /** * Retrieves the requested config from the cache or the remote server. * <p> * If the requested config is different from the one in cache, the cached request is returned immediately. * If they are equal, this method returns null. * <p> * If the config was not in cache, this method starts a <em>Subscriber</em> in a separate thread * that gets the config and calls updateSubscribers(). * * @param input The config to retrieve - can be empty (no payload), or have a valid payload. * @return A Config with a payload. */ @Override public RawConfig getConfig(RawConfig input, JRTServerConfigRequest request) { DelayedResponse delayedResponse = new DelayedResponse(request); delayedResponses.add(delayedResponse); final ConfigCacheKey configCacheKey = new ConfigCacheKey(input.getKey(), input.getDefMd5()); RawConfig cachedConfig = memoryCache.get(configCacheKey); boolean needToGetConfig = true; RawConfig ret = null; if (cachedConfig != null) { log.log(LogLevel.DEBUG, () -> "Found config " + configCacheKey + " in cache, generation=" + cachedConfig.getGeneration() + ",configmd5=" + cachedConfig.getConfigMd5()); log.log(LogLevel.SPAM, () -> "input config=" + input + ",cached config=" + cachedConfig); if (ProxyServer.configOrGenerationHasChanged(cachedConfig, request)) { log.log(LogLevel.SPAM, () -> "Cached config is not equal to requested, will return it"); if (delayedResponses.remove(delayedResponse)) { ret = cachedConfig; } } if (!cachedConfig.isError() && cachedConfig.getGeneration() > 0) { needToGetConfig = false; } } if (needToGetConfig) { subscribeToConfig(input, configCacheKey); } return ret; } private void subscribeToConfig(RawConfig input, ConfigCacheKey configCacheKey) { synchronized (activeSubscribersLock) { if (activeSubscribers.containsKey(configCacheKey)) { log.log(LogLevel.DEBUG, () -> "Already a subscriber running for: " + configCacheKey); } else { log.log(LogLevel.DEBUG, () -> "Could not find good config in cache, creating subscriber for: " + configCacheKey); UpstreamConfigSubscriber subscriber = new UpstreamConfigSubscriber(input, this, configSourceSet, timingValues, requester, memoryCache); try { subscriber.subscribe(); activeSubscribers.put(configCacheKey, subscriber); exec.execute(subscriber); } catch (ConfigurationRuntimeException e) { log.log(LogLevel.INFO, "Subscribe for '" + configCacheKey + "' failed, closing subscriber"); subscriber.cancel(); } } } } @Override public void cancel() { shutdownSourceConnections(); } /** * Takes down connection(s) to config sources and running tasks */ @Override @Override public String getActiveSourceConnection() { if (requester != null) { return requester.getConnectionPool().getCurrent().getAddress(); } else { return ""; } } @Override public List<String> getSourceConnections() { ArrayList<String> ret = new ArrayList<>(); if (requester != null) { ret.addAll(configSourceSet.getSources()); } return ret; } /** * This method will be called when a response with changed config is received from upstream * (content or generation has changed) or the server timeout has elapsed. * * @param config new config */ public void updateSubscribers(RawConfig config) { log.log(LogLevel.DEBUG, () -> "Config updated for " + config.getKey() + "," + config.getGeneration()); DelayQueue<DelayedResponse> responseDelayQueue = delayedResponses.responses(); log.log(LogLevel.SPAM, () -> "Delayed response queue: " + responseDelayQueue); if (responseDelayQueue.size() == 0) { log.log(LogLevel.DEBUG, () -> "There exists no matching element on delayed response queue for " + config.getKey()); return; } else { log.log(LogLevel.DEBUG, () -> "Delayed response queue has " + responseDelayQueue.size() + " elements"); } boolean found = false; for (DelayedResponse response : responseDelayQueue.toArray(new DelayedResponse[0])) { JRTServerConfigRequest request = response.getRequest(); if (request.getConfigKey().equals(config.getKey()) && (config.getGeneration() >= request.getRequestGeneration() || config.getGeneration() == 0)) { if (delayedResponses.remove(response)) { found = true; log.log(LogLevel.DEBUG, () -> "Call returnOkResponse for " + config.getKey() + "," + config.getGeneration()); rpcServer.returnOkResponse(request, config); } else { log.log(LogLevel.INFO, "Could not remove " + config.getKey() + " from delayedResponses queue, already removed"); } } } if (!found) { log.log(LogLevel.DEBUG, () -> "Found no recipient for " + config.getKey() + " in delayed response queue"); } log.log(LogLevel.DEBUG, () -> "Finished updating config for " + config.getKey() + "," + config.getGeneration()); } }
class RpcConfigSourceClient implements ConfigSourceClient { private final static Logger log = Logger.getLogger(RpcConfigSourceClient.class.getName()); private final Supervisor supervisor = new Supervisor(new Transport()); private final RpcServer rpcServer; private final ConfigSourceSet configSourceSet; private final HashMap<ConfigCacheKey, Subscriber> activeSubscribers = new HashMap<>(); private final Object activeSubscribersLock = new Object(); private final MemoryCache memoryCache; private final DelayedResponses delayedResponses; private final TimingValues timingValues; private final ExecutorService exec; private final JRTConfigRequester requester; RpcConfigSourceClient(RpcServer rpcServer, ConfigSourceSet configSourceSet, MemoryCache memoryCache, TimingValues timingValues, DelayedResponses delayedResponses) { this.rpcServer = rpcServer; this.configSourceSet = configSourceSet; this.memoryCache = memoryCache; this.delayedResponses = delayedResponses; this.timingValues = timingValues; checkConfigSources(); exec = Executors.newCachedThreadPool(new DaemonThreadFactory("subscriber-")); requester = new JRTConfigRequester(new JRTConnectionPool(configSourceSet), timingValues); } /** * Checks if config sources are available */ private void checkConfigSources() { if (configSourceSet == null || configSourceSet.getSources() == null || configSourceSet.getSources().size() == 0) { log.log(LogLevel.WARNING, "No config sources defined, could not check connection"); } else { Request req = new Request("ping"); for (String configSource : configSourceSet.getSources()) { Spec spec = new Spec(configSource); Target target = supervisor.connect(spec); target.invokeSync(req, 30.0); if (target.isValid()) { log.log(LogLevel.DEBUG, () -> "Created connection to config source at " + spec.toString()); return; } else { log.log(LogLevel.INFO, "Could not connect to config source at " + spec.toString()); } target.close(); } String extra = ""; log.log(LogLevel.INFO, "Could not connect to any config source in set " + configSourceSet.toString() + ", please make sure config server(s) are running. " + extra); } } /** * Retrieves the requested config from the cache or the remote server. * <p> * If the requested config is different from the one in cache, the cached request is returned immediately. * If they are equal, this method returns null. * <p> * If the config was not in cache, this method starts a <em>Subscriber</em> in a separate thread * that gets the config and calls updateSubscribers(). * * @param input The config to retrieve - can be empty (no payload), or have a valid payload. * @return A Config with a payload. */ @Override public RawConfig getConfig(RawConfig input, JRTServerConfigRequest request) { DelayedResponse delayedResponse = new DelayedResponse(request); delayedResponses.add(delayedResponse); final ConfigCacheKey configCacheKey = new ConfigCacheKey(input.getKey(), input.getDefMd5()); RawConfig cachedConfig = memoryCache.get(configCacheKey); boolean needToGetConfig = true; RawConfig ret = null; if (cachedConfig != null) { log.log(LogLevel.DEBUG, () -> "Found config " + configCacheKey + " in cache, generation=" + cachedConfig.getGeneration() + ",configmd5=" + cachedConfig.getConfigMd5()); log.log(LogLevel.SPAM, () -> "input config=" + input + ",cached config=" + cachedConfig); if (ProxyServer.configOrGenerationHasChanged(cachedConfig, request)) { log.log(LogLevel.SPAM, () -> "Cached config is not equal to requested, will return it"); if (delayedResponses.remove(delayedResponse)) { ret = cachedConfig; } } if (!cachedConfig.isError() && cachedConfig.getGeneration() > 0) { needToGetConfig = false; } } if (needToGetConfig) { subscribeToConfig(input, configCacheKey); } return ret; } private void subscribeToConfig(RawConfig input, ConfigCacheKey configCacheKey) { synchronized (activeSubscribersLock) { if (activeSubscribers.containsKey(configCacheKey)) { log.log(LogLevel.DEBUG, () -> "Already a subscriber running for: " + configCacheKey); } else { log.log(LogLevel.DEBUG, () -> "Could not find good config in cache, creating subscriber for: " + configCacheKey); UpstreamConfigSubscriber subscriber = new UpstreamConfigSubscriber(input, this, configSourceSet, timingValues, requester, memoryCache); try { subscriber.subscribe(); activeSubscribers.put(configCacheKey, subscriber); exec.execute(subscriber); } catch (ConfigurationRuntimeException e) { log.log(LogLevel.INFO, "Subscribe for '" + configCacheKey + "' failed, closing subscriber"); subscriber.cancel(); } } } } @Override public void cancel() { shutdownSourceConnections(); } /** * Takes down connection(s) to config sources and running tasks */ @Override @Override public String getActiveSourceConnection() { return requester.getConnectionPool().getCurrent().getAddress(); } @Override public List<String> getSourceConnections() { return new ArrayList<>(configSourceSet.getSources()); } /** * This method will be called when a response with changed config is received from upstream * (content or generation has changed) or the server timeout has elapsed. * * @param config new config */ public void updateSubscribers(RawConfig config) { log.log(LogLevel.DEBUG, () -> "Config updated for " + config.getKey() + "," + config.getGeneration()); DelayQueue<DelayedResponse> responseDelayQueue = delayedResponses.responses(); log.log(LogLevel.SPAM, () -> "Delayed response queue: " + responseDelayQueue); if (responseDelayQueue.size() == 0) { log.log(LogLevel.DEBUG, () -> "There exists no matching element on delayed response queue for " + config.getKey()); return; } else { log.log(LogLevel.DEBUG, () -> "Delayed response queue has " + responseDelayQueue.size() + " elements"); } boolean found = false; for (DelayedResponse response : responseDelayQueue.toArray(new DelayedResponse[0])) { JRTServerConfigRequest request = response.getRequest(); if (request.getConfigKey().equals(config.getKey()) && (config.getGeneration() >= request.getRequestGeneration() || config.getGeneration() == 0)) { if (delayedResponses.remove(response)) { found = true; log.log(LogLevel.DEBUG, () -> "Call returnOkResponse for " + config.getKey() + "," + config.getGeneration()); rpcServer.returnOkResponse(request, config); } else { log.log(LogLevel.INFO, "Could not remove " + config.getKey() + " from delayedResponses queue, already removed"); } } } if (!found) { log.log(LogLevel.DEBUG, () -> "Found no recipient for " + config.getKey() + " in delayed response queue"); } log.log(LogLevel.DEBUG, () -> "Finished updating config for " + config.getKey() + "," + config.getGeneration()); } }
Good point, they would not get the node metrics. Let's put this on hold until stream is on 7.
private void pushMetricsToContainer(NodeAgentContext context, List<DimensionMetrics> metrics) { StringBuilder params = new StringBuilder(); try { for (DimensionMetrics dimensionMetrics : metrics) { params.append(dimensionMetrics.toSecretAgentReport()); } String wrappedMetrics = "s:" + params.toString(); String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19095", "setExtraMetrics", wrappedMetrics}; dockerOperations.executeCommandInContainerAsRoot(context, 5L, command); } catch (JsonProcessingException | DockerExecTimeoutException e) { context.log(logger, LogLevel.WARNING, "Failed to push metrics to container", e); } }
String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19095", "setExtraMetrics", wrappedMetrics};
private void pushMetricsToContainer(NodeAgentContext context, List<DimensionMetrics> metrics) { StringBuilder params = new StringBuilder(); try { for (DimensionMetrics dimensionMetrics : metrics) { params.append(dimensionMetrics.toSecretAgentReport()); } String wrappedMetrics = "s:" + params.toString(); int port = context.node().getVespaVersion().map(version -> version.getMajor() == 6).orElse(false) ? 19091 : 19095; String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:" + port, "setExtraMetrics", wrappedMetrics}; dockerOperations.executeCommandInContainerAsRoot(context, 5L, command); } catch (JsonProcessingException | DockerExecTimeoutException e) { context.log(logger, LogLevel.WARNING, "Failed to push metrics to container", e); } }
class NodeAgentImpl implements NodeAgent { private static final long BYTES_IN_GB = 1_000_000_000L; private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName()); private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean hasResumedNode = false; private boolean hasStartedServices = true; private final NodeAgentContextSupplier contextSupplier; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final DockerOperations dockerOperations; private final StorageMaintainer storageMaintainer; private final Optional<CredentialsMaintainer> credentialsMaintainer; private final Optional<AclMaintainer> aclMaintainer; private final Optional<HealthChecker> healthChecker; private final DoubleFlag containerCpuCap; private int numberOfUnhandledException = 0; private DockerImage imageBeingDownloaded = null; private long currentRebootGeneration = 0; private Optional<Long> currentRestartGeneration = Optional.empty(); private final Thread loopThread; /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * STARTING state is set just before we attempt to start a container, if successful we move to the next state. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, STARTING, UNKNOWN } private ContainerState containerState = UNKNOWN; private NodeSpec lastNode = null; private CpuUsageReporter lastCpuMetric = new CpuUsageReporter(); public NodeAgentImpl( final NodeAgentContextSupplier contextSupplier, final NodeRepository nodeRepository, final Orchestrator orchestrator, final DockerOperations dockerOperations, final StorageMaintainer storageMaintainer, final FlagSource flagSource, final Optional<CredentialsMaintainer> credentialsMaintainer, final Optional<AclMaintainer> aclMaintainer, final Optional<HealthChecker> healthChecker) { this.contextSupplier = contextSupplier; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.dockerOperations = dockerOperations; this.storageMaintainer = storageMaintainer; this.credentialsMaintainer = credentialsMaintainer; this.aclMaintainer = aclMaintainer; this.healthChecker = healthChecker; this.containerCpuCap = Flags.CONTAINER_CPU_CAP.bindTo(flagSource) .with(FetchVector.Dimension.HOSTNAME, contextSupplier.currentContext().node().getHostname()); this.loopThread = new Thread(() -> { while (!terminated.get()) { try { NodeAgentContext context = contextSupplier.nextContext(); converge(context); } catch (InterruptedException ignored) { } } }); this.loopThread.setName("tick-" + contextSupplier.currentContext().hostname()); } @Override public void start() { loopThread.start(); } @Override public void stopForRemoval() { if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } contextSupplier.interrupt(); do { try { loopThread.join(); } catch (InterruptedException ignored) { } } while (loopThread.isAlive()); contextSupplier.currentContext().log(logger, "Stopped"); } void startServicesIfNeeded(NodeAgentContext context) { if (!hasStartedServices) { context.log(logger, "Starting services"); dockerOperations.startServices(context); hasStartedServices = true; } } void resumeNodeIfNeeded(NodeAgentContext context) { if (!hasResumedNode) { context.log(logger, LogLevel.DEBUG, "Starting optional node program resume command"); dockerOperations.resumeNode(context); hasResumedNode = true; } } private void updateNodeRepoWithCurrentAttributes(NodeAgentContext context) { final NodeAttributes currentNodeAttributes = new NodeAttributes(); final NodeAttributes newNodeAttributes = new NodeAttributes(); if (context.node().getWantedRestartGeneration().isPresent() && !Objects.equals(context.node().getCurrentRestartGeneration(), currentRestartGeneration)) { currentNodeAttributes.withRestartGeneration(context.node().getCurrentRestartGeneration()); newNodeAttributes.withRestartGeneration(currentRestartGeneration); } if (!Objects.equals(context.node().getCurrentRebootGeneration(), currentRebootGeneration)) { currentNodeAttributes.withRebootGeneration(context.node().getCurrentRebootGeneration()); newNodeAttributes.withRebootGeneration(currentRebootGeneration); } Optional<DockerImage> actualDockerImage = context.node().getWantedDockerImage().filter(n -> containerState == UNKNOWN); if (!Objects.equals(context.node().getCurrentDockerImage(), actualDockerImage)) { DockerImage currentImage = context.node().getCurrentDockerImage().orElse(DockerImage.EMPTY); DockerImage newImage = actualDockerImage.orElse(DockerImage.EMPTY); currentNodeAttributes.withDockerImage(currentImage); currentNodeAttributes.withVespaVersion(currentImage.tagAsVersion()); newNodeAttributes.withDockerImage(newImage); newNodeAttributes.withVespaVersion(newImage.tagAsVersion()); } publishStateToNodeRepoIfChanged(context, currentNodeAttributes, newNodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAgentContext context, NodeAttributes currentAttributes, NodeAttributes newAttributes) { if (!currentAttributes.equals(newAttributes)) { context.log(logger, "Publishing new set of attributes to node repo: %s -> %s", currentAttributes, newAttributes); nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes); } } private void startContainer(NodeAgentContext context) { ContainerData containerData = createContainerData(context); dockerOperations.createContainer(context, containerData, getContainerResources(context)); dockerOperations.startContainer(context); lastCpuMetric = new CpuUsageReporter(); hasStartedServices = true; hasResumedNode = false; context.log(logger, "Container successfully started, new containerState is " + containerState); } private Optional<Container> removeContainerIfNeededUpdateContainerState( NodeAgentContext context, Optional<Container> existingContainer) { if (existingContainer.isPresent()) { Optional<String> reason = shouldRemoveContainer(context, existingContainer.get()); if (reason.isPresent()) { removeContainer(context, existingContainer.get(), reason.get(), false); return Optional.empty(); } shouldRestartServices(context.node()).ifPresent(restartReason -> { context.log(logger, "Will restart services: " + restartReason); restartServices(context, existingContainer.get()); currentRestartGeneration = context.node().getWantedRestartGeneration(); }); } return existingContainer; } private Optional<String> shouldRestartServices(NodeSpec node) { if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty(); if (currentRestartGeneration.get() < node.getWantedRestartGeneration().get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + currentRestartGeneration.get() + " -> " + node.getWantedRestartGeneration().get()); } return Optional.empty(); } private void restartServices(NodeAgentContext context, Container existingContainer) { if (existingContainer.state.isRunning() && context.node().getState() == NodeState.active) { context.log(logger, "Restarting services"); orchestratorSuspendNode(context); dockerOperations.restartVespa(context); } } private void stopServices() { NodeAgentContext context = contextSupplier.currentContext(); context.log(logger, "Stopping services"); if (containerState == ABSENT) return; try { hasStartedServices = hasResumedNode = false; dockerOperations.stopServices(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } } @Override public void stopForHostSuspension() { NodeAgentContext context = contextSupplier.currentContext(); getContainer(context).ifPresent(container -> removeContainer(context, container, "suspending host", true)); } public void suspend() { NodeAgentContext context = contextSupplier.currentContext(); context.log(logger, "Suspending services on node"); if (containerState == ABSENT) return; try { hasResumedNode = false; dockerOperations.suspendNode(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } catch (RuntimeException e) { context.log(logger, LogLevel.WARNING, "Failed trying to suspend container", e); } } private Optional<String> shouldRemoveContainer(NodeAgentContext context, Container existingContainer) { final NodeState nodeState = context.node().getState(); if (nodeState == NodeState.dirty || nodeState == NodeState.provisioned) { return Optional.of("Node in state " + nodeState + ", container should no longer be running"); } if (context.node().getWantedDockerImage().isPresent() && !context.node().getWantedDockerImage().get().equals(existingContainer.image)) { return Optional.of("The node is supposed to run a new Docker image: " + existingContainer.image.asString() + " -> " + context.node().getWantedDockerImage().get().asString()); } if (!existingContainer.state.isRunning()) { return Optional.of("Container no longer running"); } if (currentRebootGeneration < context.node().getWantedRebootGeneration()) { return Optional.of(String.format("Container reboot wanted. Current: %d, Wanted: %d", currentRebootGeneration, context.node().getWantedRebootGeneration())); } ContainerResources wantedContainerResources = getContainerResources(context); if (!wantedContainerResources.equalsMemory(existingContainer.resources)) { return Optional.of("Container should be running with different memory allocation, wanted: " + wantedContainerResources.toStringMemory() + ", actual: " + existingContainer.resources.toStringMemory()); } if (containerState == STARTING) return Optional.of("Container failed to start"); return Optional.empty(); } private void removeContainer(NodeAgentContext context, Container existingContainer, String reason, boolean alreadySuspended) { context.log(logger, "Will remove container: " + reason); if (existingContainer.state.isRunning()) { if (!alreadySuspended) { orchestratorSuspendNode(context); } try { if (context.node().getState() != NodeState.dirty) { suspend(); } stopServices(); } catch (Exception e) { context.log(logger, LogLevel.WARNING, "Failed stopping services, ignoring", e); } } storageMaintainer.handleCoreDumpsForContainer(context, Optional.of(existingContainer)); dockerOperations.removeContainer(context, existingContainer); currentRebootGeneration = context.node().getWantedRebootGeneration(); containerState = ABSENT; context.log(logger, "Container successfully removed, new containerState is " + containerState); } private void updateContainerIfNeeded(NodeAgentContext context, Container existingContainer) { ContainerResources wantedContainerResources = getContainerResources(context); if (wantedContainerResources.equalsCpu(existingContainer.resources)) return; context.log(logger, "Container should be running with different CPU allocation, wanted: %s, current: %s", wantedContainerResources.toStringCpu(), existingContainer.resources.toStringCpu()); orchestratorSuspendNode(context); dockerOperations.updateContainer(context, wantedContainerResources); } private ContainerResources getContainerResources(NodeAgentContext context) { double cpuCap = noCpuCap(context.zone()) ? 0 : context.node().getOwner() .map(NodeOwner::asApplicationId) .map(appId -> containerCpuCap.with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm())) .orElse(containerCpuCap) .value() * context.node().getMinCpuCores(); return ContainerResources.from(cpuCap, context.node().getMinCpuCores(), context.node().getMinMainMemoryAvailableGb()); } private boolean noCpuCap(ZoneApi zone) { return zone.getEnvironment() == Environment.dev || (zone.getSystemName().isCd() && zone.getEnvironment() != Environment.prod); } private void scheduleDownLoadIfNeeded(NodeSpec node, Optional<Container> container) { if (node.getWantedDockerImage().equals(container.map(c -> c.image))) return; if (dockerOperations.pullImageAsyncIfNeeded(node.getWantedDockerImage().get())) { imageBeingDownloaded = node.getWantedDockerImage().get(); } else if (imageBeingDownloaded != null) { imageBeingDownloaded = null; } } public void converge(NodeAgentContext context) { try { doConverge(context); } catch (ConvergenceException e) { context.log(logger, e.getMessage()); } catch (ContainerNotFoundException e) { containerState = ABSENT; context.log(logger, LogLevel.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState); } catch (DockerException e) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Caught a DockerException", e); } catch (Throwable e) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Unhandled exception, ignoring", e); } } void doConverge(NodeAgentContext context) { NodeSpec node = context.node(); Optional<Container> container = getContainer(context); if (!node.equals(lastNode)) { logChangesToNodeSpec(context, lastNode, node); if (currentRebootGeneration < node.getCurrentRebootGeneration()) currentRebootGeneration = node.getCurrentRebootGeneration(); if (currentRestartGeneration.isPresent() != node.getCurrentRestartGeneration().isPresent() || currentRestartGeneration.map(current -> current < node.getCurrentRestartGeneration().get()).orElse(false)) currentRestartGeneration = node.getCurrentRestartGeneration(); if (container.map(c -> c.state.isRunning()).orElse(false)) { storageMaintainer.writeMetricsConfig(context); } lastNode = node; } switch (node.getState()) { case ready: case reserved: case parked: case failed: case inactive: removeContainerIfNeededUpdateContainerState(context, container); updateNodeRepoWithCurrentAttributes(context); break; case active: storageMaintainer.handleCoreDumpsForContainer(context, container); storageMaintainer.getDiskUsageFor(context) .map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb()) .filter(diskUtil -> diskUtil >= 0.8) .ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context)); scheduleDownLoadIfNeeded(node, container); if (isDownloadingImage()) { context.log(logger, "Waiting for image to download " + imageBeingDownloaded.asString()); return; } container = removeContainerIfNeededUpdateContainerState(context, container); credentialsMaintainer.ifPresent(maintainer -> maintainer.converge(context)); if (! container.isPresent()) { containerState = STARTING; startContainer(context); containerState = UNKNOWN; } else { updateContainerIfNeeded(context, container.get()); } aclMaintainer.ifPresent(maintainer -> maintainer.converge(context)); startServicesIfNeeded(context); resumeNodeIfNeeded(context); healthChecker.ifPresent(checker -> checker.verifyHealth(context)); updateNodeRepoWithCurrentAttributes(context); context.log(logger, "Call resume against Orchestrator"); orchestrator.resume(context.hostname().value()); break; case provisioned: nodeRepository.setNodeState(context.hostname().value(), NodeState.dirty); break; case dirty: removeContainerIfNeededUpdateContainerState(context, container); context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready"); credentialsMaintainer.ifPresent(maintainer -> maintainer.clearCredentials(context)); storageMaintainer.archiveNodeStorage(context); updateNodeRepoWithCurrentAttributes(context); nodeRepository.setNodeState(context.hostname().value(), NodeState.ready); break; default: throw new ConvergenceException("UNKNOWN STATE " + node.getState().name()); } } private static void logChangesToNodeSpec(NodeAgentContext context, NodeSpec lastNode, NodeSpec node) { StringBuilder builder = new StringBuilder(); appendIfDifferent(builder, "state", lastNode, node, NodeSpec::getState); if (builder.length() > 0) { context.log(logger, LogLevel.INFO, "Changes to node: " + builder.toString()); } } private static <T> String fieldDescription(T value) { return value == null ? "[absent]" : value.toString(); } private static <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) { T oldValue = oldNode == null ? null : getter.apply(oldNode); T newValue = getter.apply(newNode); if (!Objects.equals(oldValue, newValue)) { if (builder.length() > 0) { builder.append(", "); } builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue)); } } @SuppressWarnings("unchecked") public void updateContainerNodeMetrics() { if (containerState != UNKNOWN) return; final NodeAgentContext context = contextSupplier.currentContext(); final NodeSpec node = context.node(); Optional<ContainerStats> containerStats = dockerOperations.getContainerStats(context); if (!containerStats.isPresent()) return; Dimensions.Builder dimensionsBuilder = new Dimensions.Builder() .add("host", context.hostname().value()) .add("role", SecretAgentCheckConfig.nodeTypeToRole(context.nodeType())) .add("state", node.getState().toString()); node.getParentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent)); node.getAllowedToBeDown().ifPresent(allowed -> dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS")); Dimensions dimensions = dimensionsBuilder.build(); ContainerStats stats = containerStats.get(); final String APP = MetricReceiverWrapper.APPLICATION_NODE; final int totalNumCpuCores = stats.getCpuStats().getOnlineCpus(); final long cpuContainerKernelTime = stats.getCpuStats().getUsageInKernelMode(); final long cpuContainerTotalTime = stats.getCpuStats().getTotalUsage(); final long cpuSystemTotalTime = stats.getCpuStats().getSystemCpuUsage(); final long memoryTotalBytes = stats.getMemoryStats().getLimit(); final long memoryTotalBytesUsage = stats.getMemoryStats().getUsage(); final long memoryTotalBytesCache = stats.getMemoryStats().getCache(); final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB); final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context); lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime); final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores; double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio; double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio; long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache; double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes; double memoryTotalUsageRatio = (double) memoryTotalBytesUsage / memoryTotalBytes; Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes); List<DimensionMetrics> metrics = new ArrayList<>(); DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions) .withMetric("mem.limit", memoryTotalBytes) .withMetric("mem.used", memoryTotalBytesUsed) .withMetric("mem.util", 100 * memoryUsageRatio) .withMetric("mem_total.used", memoryTotalBytesUsage) .withMetric("mem_total.util", 100 * memoryTotalUsageRatio) .withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated) .withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated) .withMetric("cpu.vcpus", node.getMinCpuCores()) .withMetric("disk.limit", diskTotalBytes); diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed)); diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio)); metrics.add(systemMetricsBuilder.build()); stats.getNetworks().forEach((interfaceName, interfaceStats) -> { Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build(); DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims) .withMetric("net.in.bytes", interfaceStats.getRxBytes()) .withMetric("net.in.errors", interfaceStats.getRxErrors()) .withMetric("net.in.dropped", interfaceStats.getRxDropped()) .withMetric("net.out.bytes", interfaceStats.getTxBytes()) .withMetric("net.out.errors", interfaceStats.getTxErrors()) .withMetric("net.out.dropped", interfaceStats.getTxDropped()) .build(); metrics.add(networkMetrics); }); pushMetricsToContainer(context, metrics); } private Optional<Container> getContainer(NodeAgentContext context) { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = dockerOperations.getContainer(context); if (! container.isPresent()) containerState = ABSENT; return container; } @Override public boolean isDownloadingImage() { return imageBeingDownloaded != null; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } } private void orchestratorSuspendNode(NodeAgentContext context) { if (context.node().getState() != NodeState.active) return; context.log(logger, "Ask Orchestrator for permission to suspend node"); try { orchestrator.suspend(context.hostname().value()); } catch (OrchestratorException e) { try { aclMaintainer.ifPresent(maintainer -> maintainer.converge(context)); } catch (RuntimeException suppressed) { logger.log(LogLevel.WARNING, "Suppressing ACL update failure: " + suppressed); e.addSuppressed(suppressed); } throw e; } } protected ContainerData createContainerData(NodeAgentContext context) { return new ContainerData() { @Override public void addFile(Path pathInContainer, String data) { throw new UnsupportedOperationException("addFile not implemented"); } @Override public void createSymlink(Path symlink, Path target) { throw new UnsupportedOperationException("createSymlink not implemented"); } }; } }
class NodeAgentImpl implements NodeAgent { private static final long BYTES_IN_GB = 1_000_000_000L; private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName()); private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean hasResumedNode = false; private boolean hasStartedServices = true; private final NodeAgentContextSupplier contextSupplier; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final DockerOperations dockerOperations; private final StorageMaintainer storageMaintainer; private final Optional<CredentialsMaintainer> credentialsMaintainer; private final Optional<AclMaintainer> aclMaintainer; private final Optional<HealthChecker> healthChecker; private final DoubleFlag containerCpuCap; private int numberOfUnhandledException = 0; private DockerImage imageBeingDownloaded = null; private long currentRebootGeneration = 0; private Optional<Long> currentRestartGeneration = Optional.empty(); private final Thread loopThread; /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * STARTING state is set just before we attempt to start a container, if successful we move to the next state. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, STARTING, UNKNOWN } private ContainerState containerState = UNKNOWN; private NodeSpec lastNode = null; private CpuUsageReporter lastCpuMetric = new CpuUsageReporter(); public NodeAgentImpl( final NodeAgentContextSupplier contextSupplier, final NodeRepository nodeRepository, final Orchestrator orchestrator, final DockerOperations dockerOperations, final StorageMaintainer storageMaintainer, final FlagSource flagSource, final Optional<CredentialsMaintainer> credentialsMaintainer, final Optional<AclMaintainer> aclMaintainer, final Optional<HealthChecker> healthChecker) { this.contextSupplier = contextSupplier; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.dockerOperations = dockerOperations; this.storageMaintainer = storageMaintainer; this.credentialsMaintainer = credentialsMaintainer; this.aclMaintainer = aclMaintainer; this.healthChecker = healthChecker; this.containerCpuCap = Flags.CONTAINER_CPU_CAP.bindTo(flagSource) .with(FetchVector.Dimension.HOSTNAME, contextSupplier.currentContext().node().getHostname()); this.loopThread = new Thread(() -> { while (!terminated.get()) { try { NodeAgentContext context = contextSupplier.nextContext(); converge(context); } catch (InterruptedException ignored) { } } }); this.loopThread.setName("tick-" + contextSupplier.currentContext().hostname()); } @Override public void start() { loopThread.start(); } @Override public void stopForRemoval() { if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } contextSupplier.interrupt(); do { try { loopThread.join(); } catch (InterruptedException ignored) { } } while (loopThread.isAlive()); contextSupplier.currentContext().log(logger, "Stopped"); } void startServicesIfNeeded(NodeAgentContext context) { if (!hasStartedServices) { context.log(logger, "Starting services"); dockerOperations.startServices(context); hasStartedServices = true; } } void resumeNodeIfNeeded(NodeAgentContext context) { if (!hasResumedNode) { context.log(logger, LogLevel.DEBUG, "Starting optional node program resume command"); dockerOperations.resumeNode(context); hasResumedNode = true; } } private void updateNodeRepoWithCurrentAttributes(NodeAgentContext context) { final NodeAttributes currentNodeAttributes = new NodeAttributes(); final NodeAttributes newNodeAttributes = new NodeAttributes(); if (context.node().getWantedRestartGeneration().isPresent() && !Objects.equals(context.node().getCurrentRestartGeneration(), currentRestartGeneration)) { currentNodeAttributes.withRestartGeneration(context.node().getCurrentRestartGeneration()); newNodeAttributes.withRestartGeneration(currentRestartGeneration); } if (!Objects.equals(context.node().getCurrentRebootGeneration(), currentRebootGeneration)) { currentNodeAttributes.withRebootGeneration(context.node().getCurrentRebootGeneration()); newNodeAttributes.withRebootGeneration(currentRebootGeneration); } Optional<DockerImage> actualDockerImage = context.node().getWantedDockerImage().filter(n -> containerState == UNKNOWN); if (!Objects.equals(context.node().getCurrentDockerImage(), actualDockerImage)) { DockerImage currentImage = context.node().getCurrentDockerImage().orElse(DockerImage.EMPTY); DockerImage newImage = actualDockerImage.orElse(DockerImage.EMPTY); currentNodeAttributes.withDockerImage(currentImage); currentNodeAttributes.withVespaVersion(currentImage.tagAsVersion()); newNodeAttributes.withDockerImage(newImage); newNodeAttributes.withVespaVersion(newImage.tagAsVersion()); } publishStateToNodeRepoIfChanged(context, currentNodeAttributes, newNodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAgentContext context, NodeAttributes currentAttributes, NodeAttributes newAttributes) { if (!currentAttributes.equals(newAttributes)) { context.log(logger, "Publishing new set of attributes to node repo: %s -> %s", currentAttributes, newAttributes); nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes); } } private void startContainer(NodeAgentContext context) { ContainerData containerData = createContainerData(context); dockerOperations.createContainer(context, containerData, getContainerResources(context)); dockerOperations.startContainer(context); lastCpuMetric = new CpuUsageReporter(); hasStartedServices = true; hasResumedNode = false; context.log(logger, "Container successfully started, new containerState is " + containerState); } private Optional<Container> removeContainerIfNeededUpdateContainerState( NodeAgentContext context, Optional<Container> existingContainer) { if (existingContainer.isPresent()) { Optional<String> reason = shouldRemoveContainer(context, existingContainer.get()); if (reason.isPresent()) { removeContainer(context, existingContainer.get(), reason.get(), false); return Optional.empty(); } shouldRestartServices(context.node()).ifPresent(restartReason -> { context.log(logger, "Will restart services: " + restartReason); restartServices(context, existingContainer.get()); currentRestartGeneration = context.node().getWantedRestartGeneration(); }); } return existingContainer; } private Optional<String> shouldRestartServices(NodeSpec node) { if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty(); if (currentRestartGeneration.get() < node.getWantedRestartGeneration().get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + currentRestartGeneration.get() + " -> " + node.getWantedRestartGeneration().get()); } return Optional.empty(); } private void restartServices(NodeAgentContext context, Container existingContainer) { if (existingContainer.state.isRunning() && context.node().getState() == NodeState.active) { context.log(logger, "Restarting services"); orchestratorSuspendNode(context); dockerOperations.restartVespa(context); } } private void stopServices() { NodeAgentContext context = contextSupplier.currentContext(); context.log(logger, "Stopping services"); if (containerState == ABSENT) return; try { hasStartedServices = hasResumedNode = false; dockerOperations.stopServices(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } } @Override public void stopForHostSuspension() { NodeAgentContext context = contextSupplier.currentContext(); getContainer(context).ifPresent(container -> removeContainer(context, container, "suspending host", true)); } public void suspend() { NodeAgentContext context = contextSupplier.currentContext(); context.log(logger, "Suspending services on node"); if (containerState == ABSENT) return; try { hasResumedNode = false; dockerOperations.suspendNode(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } catch (RuntimeException e) { context.log(logger, LogLevel.WARNING, "Failed trying to suspend container", e); } } private Optional<String> shouldRemoveContainer(NodeAgentContext context, Container existingContainer) { final NodeState nodeState = context.node().getState(); if (nodeState == NodeState.dirty || nodeState == NodeState.provisioned) { return Optional.of("Node in state " + nodeState + ", container should no longer be running"); } if (context.node().getWantedDockerImage().isPresent() && !context.node().getWantedDockerImage().get().equals(existingContainer.image)) { return Optional.of("The node is supposed to run a new Docker image: " + existingContainer.image.asString() + " -> " + context.node().getWantedDockerImage().get().asString()); } if (!existingContainer.state.isRunning()) { return Optional.of("Container no longer running"); } if (currentRebootGeneration < context.node().getWantedRebootGeneration()) { return Optional.of(String.format("Container reboot wanted. Current: %d, Wanted: %d", currentRebootGeneration, context.node().getWantedRebootGeneration())); } ContainerResources wantedContainerResources = getContainerResources(context); if (!wantedContainerResources.equalsMemory(existingContainer.resources)) { return Optional.of("Container should be running with different memory allocation, wanted: " + wantedContainerResources.toStringMemory() + ", actual: " + existingContainer.resources.toStringMemory()); } if (containerState == STARTING) return Optional.of("Container failed to start"); return Optional.empty(); } private void removeContainer(NodeAgentContext context, Container existingContainer, String reason, boolean alreadySuspended) { context.log(logger, "Will remove container: " + reason); if (existingContainer.state.isRunning()) { if (!alreadySuspended) { orchestratorSuspendNode(context); } try { if (context.node().getState() != NodeState.dirty) { suspend(); } stopServices(); } catch (Exception e) { context.log(logger, LogLevel.WARNING, "Failed stopping services, ignoring", e); } } storageMaintainer.handleCoreDumpsForContainer(context, Optional.of(existingContainer)); dockerOperations.removeContainer(context, existingContainer); currentRebootGeneration = context.node().getWantedRebootGeneration(); containerState = ABSENT; context.log(logger, "Container successfully removed, new containerState is " + containerState); } private void updateContainerIfNeeded(NodeAgentContext context, Container existingContainer) { ContainerResources wantedContainerResources = getContainerResources(context); if (wantedContainerResources.equalsCpu(existingContainer.resources)) return; context.log(logger, "Container should be running with different CPU allocation, wanted: %s, current: %s", wantedContainerResources.toStringCpu(), existingContainer.resources.toStringCpu()); orchestratorSuspendNode(context); dockerOperations.updateContainer(context, wantedContainerResources); } private ContainerResources getContainerResources(NodeAgentContext context) { double cpuCap = noCpuCap(context.zone()) ? 0 : context.node().getOwner() .map(NodeOwner::asApplicationId) .map(appId -> containerCpuCap.with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm())) .orElse(containerCpuCap) .value() * context.node().getMinCpuCores(); return ContainerResources.from(cpuCap, context.node().getMinCpuCores(), context.node().getMinMainMemoryAvailableGb()); } private boolean noCpuCap(ZoneApi zone) { return zone.getEnvironment() == Environment.dev || (zone.getSystemName().isCd() && zone.getEnvironment() != Environment.prod); } private void scheduleDownLoadIfNeeded(NodeSpec node, Optional<Container> container) { if (node.getWantedDockerImage().equals(container.map(c -> c.image))) return; if (dockerOperations.pullImageAsyncIfNeeded(node.getWantedDockerImage().get())) { imageBeingDownloaded = node.getWantedDockerImage().get(); } else if (imageBeingDownloaded != null) { imageBeingDownloaded = null; } } public void converge(NodeAgentContext context) { try { doConverge(context); } catch (ConvergenceException e) { context.log(logger, e.getMessage()); } catch (ContainerNotFoundException e) { containerState = ABSENT; context.log(logger, LogLevel.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState); } catch (DockerException e) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Caught a DockerException", e); } catch (Throwable e) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Unhandled exception, ignoring", e); } } void doConverge(NodeAgentContext context) { NodeSpec node = context.node(); Optional<Container> container = getContainer(context); if (!node.equals(lastNode)) { logChangesToNodeSpec(context, lastNode, node); if (currentRebootGeneration < node.getCurrentRebootGeneration()) currentRebootGeneration = node.getCurrentRebootGeneration(); if (currentRestartGeneration.isPresent() != node.getCurrentRestartGeneration().isPresent() || currentRestartGeneration.map(current -> current < node.getCurrentRestartGeneration().get()).orElse(false)) currentRestartGeneration = node.getCurrentRestartGeneration(); if (container.map(c -> c.state.isRunning()).orElse(false)) { storageMaintainer.writeMetricsConfig(context); } lastNode = node; } switch (node.getState()) { case ready: case reserved: case parked: case failed: case inactive: removeContainerIfNeededUpdateContainerState(context, container); updateNodeRepoWithCurrentAttributes(context); break; case active: storageMaintainer.handleCoreDumpsForContainer(context, container); storageMaintainer.getDiskUsageFor(context) .map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb()) .filter(diskUtil -> diskUtil >= 0.8) .ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context)); scheduleDownLoadIfNeeded(node, container); if (isDownloadingImage()) { context.log(logger, "Waiting for image to download " + imageBeingDownloaded.asString()); return; } container = removeContainerIfNeededUpdateContainerState(context, container); credentialsMaintainer.ifPresent(maintainer -> maintainer.converge(context)); if (! container.isPresent()) { containerState = STARTING; startContainer(context); containerState = UNKNOWN; } else { updateContainerIfNeeded(context, container.get()); } aclMaintainer.ifPresent(maintainer -> maintainer.converge(context)); startServicesIfNeeded(context); resumeNodeIfNeeded(context); healthChecker.ifPresent(checker -> checker.verifyHealth(context)); updateNodeRepoWithCurrentAttributes(context); context.log(logger, "Call resume against Orchestrator"); orchestrator.resume(context.hostname().value()); break; case provisioned: nodeRepository.setNodeState(context.hostname().value(), NodeState.dirty); break; case dirty: removeContainerIfNeededUpdateContainerState(context, container); context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready"); credentialsMaintainer.ifPresent(maintainer -> maintainer.clearCredentials(context)); storageMaintainer.archiveNodeStorage(context); updateNodeRepoWithCurrentAttributes(context); nodeRepository.setNodeState(context.hostname().value(), NodeState.ready); break; default: throw new ConvergenceException("UNKNOWN STATE " + node.getState().name()); } } private static void logChangesToNodeSpec(NodeAgentContext context, NodeSpec lastNode, NodeSpec node) { StringBuilder builder = new StringBuilder(); appendIfDifferent(builder, "state", lastNode, node, NodeSpec::getState); if (builder.length() > 0) { context.log(logger, LogLevel.INFO, "Changes to node: " + builder.toString()); } } private static <T> String fieldDescription(T value) { return value == null ? "[absent]" : value.toString(); } private static <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) { T oldValue = oldNode == null ? null : getter.apply(oldNode); T newValue = getter.apply(newNode); if (!Objects.equals(oldValue, newValue)) { if (builder.length() > 0) { builder.append(", "); } builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue)); } } @SuppressWarnings("unchecked") public void updateContainerNodeMetrics() { if (containerState != UNKNOWN) return; final NodeAgentContext context = contextSupplier.currentContext(); final NodeSpec node = context.node(); Optional<ContainerStats> containerStats = dockerOperations.getContainerStats(context); if (!containerStats.isPresent()) return; Dimensions.Builder dimensionsBuilder = new Dimensions.Builder() .add("host", context.hostname().value()) .add("role", SecretAgentCheckConfig.nodeTypeToRole(context.nodeType())) .add("state", node.getState().toString()); node.getParentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent)); node.getAllowedToBeDown().ifPresent(allowed -> dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS")); Dimensions dimensions = dimensionsBuilder.build(); ContainerStats stats = containerStats.get(); final String APP = MetricReceiverWrapper.APPLICATION_NODE; final int totalNumCpuCores = stats.getCpuStats().getOnlineCpus(); final long cpuContainerKernelTime = stats.getCpuStats().getUsageInKernelMode(); final long cpuContainerTotalTime = stats.getCpuStats().getTotalUsage(); final long cpuSystemTotalTime = stats.getCpuStats().getSystemCpuUsage(); final long memoryTotalBytes = stats.getMemoryStats().getLimit(); final long memoryTotalBytesUsage = stats.getMemoryStats().getUsage(); final long memoryTotalBytesCache = stats.getMemoryStats().getCache(); final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB); final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context); lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime); final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores; double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio; double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio; long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache; double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes; double memoryTotalUsageRatio = (double) memoryTotalBytesUsage / memoryTotalBytes; Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes); List<DimensionMetrics> metrics = new ArrayList<>(); DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions) .withMetric("mem.limit", memoryTotalBytes) .withMetric("mem.used", memoryTotalBytesUsed) .withMetric("mem.util", 100 * memoryUsageRatio) .withMetric("mem_total.used", memoryTotalBytesUsage) .withMetric("mem_total.util", 100 * memoryTotalUsageRatio) .withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated) .withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated) .withMetric("cpu.vcpus", node.getMinCpuCores()) .withMetric("disk.limit", diskTotalBytes); diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed)); diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio)); metrics.add(systemMetricsBuilder.build()); stats.getNetworks().forEach((interfaceName, interfaceStats) -> { Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build(); DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims) .withMetric("net.in.bytes", interfaceStats.getRxBytes()) .withMetric("net.in.errors", interfaceStats.getRxErrors()) .withMetric("net.in.dropped", interfaceStats.getRxDropped()) .withMetric("net.out.bytes", interfaceStats.getTxBytes()) .withMetric("net.out.errors", interfaceStats.getTxErrors()) .withMetric("net.out.dropped", interfaceStats.getTxDropped()) .build(); metrics.add(networkMetrics); }); pushMetricsToContainer(context, metrics); } private Optional<Container> getContainer(NodeAgentContext context) { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = dockerOperations.getContainer(context); if (! container.isPresent()) containerState = ABSENT; return container; } @Override public boolean isDownloadingImage() { return imageBeingDownloaded != null; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } } private void orchestratorSuspendNode(NodeAgentContext context) { if (context.node().getState() != NodeState.active) return; context.log(logger, "Ask Orchestrator for permission to suspend node"); try { orchestrator.suspend(context.hostname().value()); } catch (OrchestratorException e) { try { aclMaintainer.ifPresent(maintainer -> maintainer.converge(context)); } catch (RuntimeException suppressed) { logger.log(LogLevel.WARNING, "Suppressing ACL update failure: " + suppressed); e.addSuppressed(suppressed); } throw e; } } protected ContainerData createContainerData(NodeAgentContext context) { return new ContainerData() { @Override public void addFile(Path pathInContainer, String data) { throw new UnsupportedOperationException("addFile not implemented"); } @Override public void createSymlink(Path symlink, Path target) { throw new UnsupportedOperationException("createSymlink not implemented"); } }; } }
Since I'm already nitpicking: This should say "Has no skipped outputs" (the message describes what you're asserting).
public void testSoftmaxImport() { TestableTensorFlowModel model = new TestableTensorFlowModel("test", "src/test/models/tensorflow/softmax/saved", 1, 5); ImportedModel.Signature signature = model.get().signature("serving_default"); Assert.assertEquals("Has skipped outputs", 0, model.get().signature("serving_default").skippedOutputs().size()); ImportedMlFunction output = signature.outputFunction("y", "y"); assertNotNull(output); model.assertEqualResult("input", "output"); }
Assert.assertEquals("Has skipped outputs",
public void testSoftmaxImport() { TestableTensorFlowModel model = new TestableTensorFlowModel("test", "src/test/models/tensorflow/softmax/saved", 1, 5); ImportedModel.Signature signature = model.get().signature("serving_default"); Assert.assertEquals("Should have no skipped outputs", 0, model.get().signature("serving_default").skippedOutputs().size()); ImportedMlFunction output = signature.outputFunction("y", "y"); assertNotNull(output); model.assertEqualResult("input", "output"); }
class SoftmaxImportTestCase { @Test }
class SoftmaxImportTestCase { @Test }
This doesn't really make sense to me. This message is output when the assertion fails, and should help in figuring out what the problem is. In this case "has no skipped outputs" would just be confusing. Perhaps something more descriptive, e.g. "Should have no skipped outputs" ?
public void testSoftmaxImport() { TestableTensorFlowModel model = new TestableTensorFlowModel("test", "src/test/models/tensorflow/softmax/saved", 1, 5); ImportedModel.Signature signature = model.get().signature("serving_default"); Assert.assertEquals("Has skipped outputs", 0, model.get().signature("serving_default").skippedOutputs().size()); ImportedMlFunction output = signature.outputFunction("y", "y"); assertNotNull(output); model.assertEqualResult("input", "output"); }
Assert.assertEquals("Has skipped outputs",
public void testSoftmaxImport() { TestableTensorFlowModel model = new TestableTensorFlowModel("test", "src/test/models/tensorflow/softmax/saved", 1, 5); ImportedModel.Signature signature = model.get().signature("serving_default"); Assert.assertEquals("Should have no skipped outputs", 0, model.get().signature("serving_default").skippedOutputs().size()); ImportedMlFunction output = signature.outputFunction("y", "y"); assertNotNull(output); model.assertEqualResult("input", "output"); }
class SoftmaxImportTestCase { @Test }
class SoftmaxImportTestCase { @Test }
Throw certificatenotreadyexception ?
public void validate(VespaModel model, DeployState deployState) { if (deployState.tlsSecrets().isPresent() && deployState.tlsSecrets().get() == TlsSecrets.MISSING) { throw new IllegalArgumentException("TLS enabled, but could not retrieve certificate yet"); } }
throw new IllegalArgumentException("TLS enabled, but could not retrieve certificate yet");
public void validate(VespaModel model, DeployState deployState) { if (deployState.tlsSecrets().isPresent() && deployState.tlsSecrets().get() == TlsSecrets.MISSING) { throw new CertificateNotReadyException("TLS enabled, but could not retrieve certificate yet"); } }
class TlsSecretsValidator extends Validator { /** This check is delayed until validation to allow node provisioning to complete while we are waiting for cert */ @Override }
class TlsSecretsValidator extends Validator { /** This check is delayed until validation to allow node provisioning to complete while we are waiting for cert */ @Override }
Breaks alignment.
public LockedApplication with(List<AssignedRotation> assignedRotations) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, assignedRotations, rotationStatus); }
deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion,
public LockedApplication with(List<AssignedRotation> assignedRotations) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, assignedRotations, rotationStatus); }
class LockedApplication { private final Lock lock; private final ApplicationId id; private final Instant createdAt; private final DeploymentSpec deploymentSpec; private final ValidationOverrides validationOverrides; private final Map<ZoneId, Deployment> deployments; private final DeploymentJobs deploymentJobs; private final Change change; private final Change outstandingChange; private final Optional<IssueId> ownershipIssueId; private final Optional<User> owner; private final OptionalInt majorVersion; private final ApplicationMetrics metrics; private final Optional<String> pemDeployKey; private final List<AssignedRotation> rotations; private final Map<HostName, RotationStatus> rotationStatus; /** * Used to create a locked application * * @param application The application to lock. * @param lock The lock for the application. */ LockedApplication(Application application, Lock lock) { this(Objects.requireNonNull(lock, "lock cannot be null"), application.id(), application.createdAt(), application.deploymentSpec(), application.validationOverrides(), application.deployments(), application.deploymentJobs(), application.change(), application.outstandingChange(), application.ownershipIssueId(), application.owner(), application.majorVersion(), application.metrics(), application.pemDeployKey(), application.assignedRotations(), application.rotationStatus()); } private LockedApplication(Lock lock, ApplicationId id, Instant createdAt, DeploymentSpec deploymentSpec, ValidationOverrides validationOverrides, Map<ZoneId, Deployment> deployments, DeploymentJobs deploymentJobs, Change change, Change outstandingChange, Optional<IssueId> ownershipIssueId, Optional<User> owner, OptionalInt majorVersion, ApplicationMetrics metrics, Optional<String> pemDeployKey, List<AssignedRotation> rotations, Map<HostName, RotationStatus> rotationStatus) { this.lock = lock; this.id = id; this.createdAt = createdAt; this.deploymentSpec = deploymentSpec; this.validationOverrides = validationOverrides; this.deployments = deployments; this.deploymentJobs = deploymentJobs; this.change = change; this.outstandingChange = outstandingChange; this.ownershipIssueId = ownershipIssueId; this.owner = owner; this.majorVersion = majorVersion; this.metrics = metrics; this.pemDeployKey = pemDeployKey; this.rotations = rotations; this.rotationStatus = rotationStatus; } /** Returns a read-only copy of this */ public Application get() { return new Application(id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withBuiltInternally(boolean builtInternally) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.withBuiltInternally(builtInternally), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withProjectId(OptionalLong projectId) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.withProjectId(projectId), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withDeploymentIssueId(IssueId issueId) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.with(issueId), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withJobPause(JobType jobType, OptionalLong pausedUntil) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.withPause(jobType, pausedUntil), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withJobCompletion(long projectId, JobType jobType, JobStatus.JobRun completion, Optional<DeploymentJobs.JobError> jobError) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.withCompletion(projectId, jobType, completion, jobError), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withJobTriggering(JobType jobType, JobStatus.JobRun job) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.withTriggering(jobType, job), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withNewDeployment(ZoneId zone, ApplicationVersion applicationVersion, Version version, Instant instant, Map<DeploymentMetrics.Warning, Integer> warnings) { Deployment previousDeployment = deployments.getOrDefault(zone, new Deployment(zone, applicationVersion, version, instant)); Deployment newDeployment = new Deployment(zone, applicationVersion, version, instant, previousDeployment.clusterUtils(), previousDeployment.clusterInfo(), previousDeployment.metrics().with(warnings), previousDeployment.activity()); return with(newDeployment); } public LockedApplication withClusterUtilization(ZoneId zone, Map<ClusterSpec.Id, ClusterUtilization> clusterUtilization) { Deployment deployment = deployments.get(zone); if (deployment == null) return this; return with(deployment.withClusterUtils(clusterUtilization)); } public LockedApplication withClusterInfo(ZoneId zone, Map<ClusterSpec.Id, ClusterInfo> clusterInfo) { Deployment deployment = deployments.get(zone); if (deployment == null) return this; return with(deployment.withClusterInfo(clusterInfo)); } public LockedApplication recordActivityAt(Instant instant, ZoneId zone) { Deployment deployment = deployments.get(zone); if (deployment == null) return this; return with(deployment.recordActivityAt(instant)); } public LockedApplication with(ZoneId zone, DeploymentMetrics deploymentMetrics) { Deployment deployment = deployments.get(zone); if (deployment == null) return this; return with(deployment.withMetrics(deploymentMetrics)); } public LockedApplication withoutDeploymentIn(ZoneId zone) { Map<ZoneId, Deployment> deployments = new LinkedHashMap<>(this.deployments); deployments.remove(zone); return with(deployments); } public LockedApplication withoutDeploymentJob(JobType jobType) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.without(jobType), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication with(DeploymentSpec deploymentSpec) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication with(ValidationOverrides validationOverrides) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withChange(Change change) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withOutstandingChange(Change outstandingChange) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withOwnershipIssueId(IssueId issueId) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, Optional.ofNullable(issueId), owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withOwner(User owner) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, Optional.ofNullable(owner), majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } /** Set a major version for this, or set to null to remove any major version override */ public LockedApplication withMajorVersion(Integer majorVersion) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion == null ? OptionalInt.empty() : OptionalInt.of(majorVersion), metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication with(MetricsService.ApplicationMetrics metrics) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withPemDeployKey(String pemDeployKey) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, Optional.ofNullable(pemDeployKey), rotations, rotationStatus); } public LockedApplication withRotationStatus(Map<HostName, RotationStatus> rotationStatus) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } /** Don't expose non-leaf sub-objects. */ private LockedApplication with(Deployment deployment) { Map<ZoneId, Deployment> deployments = new LinkedHashMap<>(this.deployments); deployments.put(deployment.zone(), deployment); return with(deployments); } private LockedApplication with(Map<ZoneId, Deployment> deployments) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } @Override public String toString() { return "application '" + id + "'"; } }
class LockedApplication { private final Lock lock; private final ApplicationId id; private final Instant createdAt; private final DeploymentSpec deploymentSpec; private final ValidationOverrides validationOverrides; private final Map<ZoneId, Deployment> deployments; private final DeploymentJobs deploymentJobs; private final Change change; private final Change outstandingChange; private final Optional<IssueId> ownershipIssueId; private final Optional<User> owner; private final OptionalInt majorVersion; private final ApplicationMetrics metrics; private final Optional<String> pemDeployKey; private final List<AssignedRotation> rotations; private final Map<HostName, RotationStatus> rotationStatus; /** * Used to create a locked application * * @param application The application to lock. * @param lock The lock for the application. */ LockedApplication(Application application, Lock lock) { this(Objects.requireNonNull(lock, "lock cannot be null"), application.id(), application.createdAt(), application.deploymentSpec(), application.validationOverrides(), application.deployments(), application.deploymentJobs(), application.change(), application.outstandingChange(), application.ownershipIssueId(), application.owner(), application.majorVersion(), application.metrics(), application.pemDeployKey(), application.assignedRotations(), application.rotationStatus()); } private LockedApplication(Lock lock, ApplicationId id, Instant createdAt, DeploymentSpec deploymentSpec, ValidationOverrides validationOverrides, Map<ZoneId, Deployment> deployments, DeploymentJobs deploymentJobs, Change change, Change outstandingChange, Optional<IssueId> ownershipIssueId, Optional<User> owner, OptionalInt majorVersion, ApplicationMetrics metrics, Optional<String> pemDeployKey, List<AssignedRotation> rotations, Map<HostName, RotationStatus> rotationStatus) { this.lock = lock; this.id = id; this.createdAt = createdAt; this.deploymentSpec = deploymentSpec; this.validationOverrides = validationOverrides; this.deployments = deployments; this.deploymentJobs = deploymentJobs; this.change = change; this.outstandingChange = outstandingChange; this.ownershipIssueId = ownershipIssueId; this.owner = owner; this.majorVersion = majorVersion; this.metrics = metrics; this.pemDeployKey = pemDeployKey; this.rotations = rotations; this.rotationStatus = rotationStatus; } /** Returns a read-only copy of this */ public Application get() { return new Application(id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withBuiltInternally(boolean builtInternally) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.withBuiltInternally(builtInternally), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withProjectId(OptionalLong projectId) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.withProjectId(projectId), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withDeploymentIssueId(IssueId issueId) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.with(issueId), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withJobPause(JobType jobType, OptionalLong pausedUntil) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.withPause(jobType, pausedUntil), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withJobCompletion(long projectId, JobType jobType, JobStatus.JobRun completion, Optional<DeploymentJobs.JobError> jobError) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.withCompletion(projectId, jobType, completion, jobError), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withJobTriggering(JobType jobType, JobStatus.JobRun job) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.withTriggering(jobType, job), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withNewDeployment(ZoneId zone, ApplicationVersion applicationVersion, Version version, Instant instant, Map<DeploymentMetrics.Warning, Integer> warnings) { Deployment previousDeployment = deployments.getOrDefault(zone, new Deployment(zone, applicationVersion, version, instant)); Deployment newDeployment = new Deployment(zone, applicationVersion, version, instant, previousDeployment.clusterUtils(), previousDeployment.clusterInfo(), previousDeployment.metrics().with(warnings), previousDeployment.activity()); return with(newDeployment); } public LockedApplication withClusterUtilization(ZoneId zone, Map<ClusterSpec.Id, ClusterUtilization> clusterUtilization) { Deployment deployment = deployments.get(zone); if (deployment == null) return this; return with(deployment.withClusterUtils(clusterUtilization)); } public LockedApplication withClusterInfo(ZoneId zone, Map<ClusterSpec.Id, ClusterInfo> clusterInfo) { Deployment deployment = deployments.get(zone); if (deployment == null) return this; return with(deployment.withClusterInfo(clusterInfo)); } public LockedApplication recordActivityAt(Instant instant, ZoneId zone) { Deployment deployment = deployments.get(zone); if (deployment == null) return this; return with(deployment.recordActivityAt(instant)); } public LockedApplication with(ZoneId zone, DeploymentMetrics deploymentMetrics) { Deployment deployment = deployments.get(zone); if (deployment == null) return this; return with(deployment.withMetrics(deploymentMetrics)); } public LockedApplication withoutDeploymentIn(ZoneId zone) { Map<ZoneId, Deployment> deployments = new LinkedHashMap<>(this.deployments); deployments.remove(zone); return with(deployments); } public LockedApplication withoutDeploymentJob(JobType jobType) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.without(jobType), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication with(DeploymentSpec deploymentSpec) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication with(ValidationOverrides validationOverrides) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withChange(Change change) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withOutstandingChange(Change outstandingChange) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withOwnershipIssueId(IssueId issueId) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, Optional.ofNullable(issueId), owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withOwner(User owner) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, Optional.ofNullable(owner), majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } /** Set a major version for this, or set to null to remove any major version override */ public LockedApplication withMajorVersion(Integer majorVersion) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion == null ? OptionalInt.empty() : OptionalInt.of(majorVersion), metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication with(MetricsService.ApplicationMetrics metrics) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withPemDeployKey(String pemDeployKey) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, Optional.ofNullable(pemDeployKey), rotations, rotationStatus); } public LockedApplication withRotationStatus(Map<HostName, RotationStatus> rotationStatus) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } /** Don't expose non-leaf sub-objects. */ private LockedApplication with(Deployment deployment) { Map<ZoneId, Deployment> deployments = new LinkedHashMap<>(this.deployments); deployments.put(deployment.zone(), deployment); return with(deployments); } private LockedApplication with(Map<ZoneId, Deployment> deployments) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } @Override public String toString() { return "application '" + id + "'"; } }
I think it would be better to use the same name as the `AssignedRotation` field. E.g. "endpoint" -> "endpointId". This and the corresponding `fromSlime` method should use constants for the field names.
private void assignedRotationsToSlime(List<AssignedRotation> rotations, Cursor parent, String fieldName) { final var rotationsArray = parent.setArray(fieldName); for (var rotation : rotations) { final var object = rotationsArray.addObject(); object.setString("endpoint", rotation.endpointId().id()); object.setString("rotation", rotation.rotationId().asString()); object.setString("container", rotation.clusterId().value()); } }
object.setString("endpoint", rotation.endpointId().id());
private void assignedRotationsToSlime(List<AssignedRotation> rotations, Cursor parent, String fieldName) { final var rotationsArray = parent.setArray(fieldName); for (var rotation : rotations) { final var object = rotationsArray.addObject(); object.setString(assignedRotationEndpointField, rotation.endpointId().id()); object.setString(assignedRotationRotationField, rotation.rotationId().asString()); object.setString(assignedRotationClusterField, rotation.clusterId().value()); } }
class ApplicationSerializer { private final String idField = "id"; private final String createdAtField = "createdAt"; private final String deploymentSpecField = "deploymentSpecField"; private final String validationOverridesField = "validationOverrides"; private final String deploymentsField = "deployments"; private final String deploymentJobsField = "deploymentJobs"; private final String deployingField = "deployingField"; private final String pinnedField = "pinned"; private final String outstandingChangeField = "outstandingChangeField"; private final String ownershipIssueIdField = "ownershipIssueId"; private final String ownerField = "confirmedOwner"; private final String majorVersionField = "majorVersion"; private final String writeQualityField = "writeQuality"; private final String queryQualityField = "queryQuality"; private final String pemDeployKeyField = "pemDeployKey"; private final String assignedRotationsField = "assignedRotations"; private final String rotationsField = "endpoints"; private final String deprecatedRotationField = "rotation"; private final String rotationStatusField = "rotationStatus"; private final String zoneField = "zone"; private final String environmentField = "environment"; private final String regionField = "region"; private final String deployTimeField = "deployTime"; private final String applicationBuildNumberField = "applicationBuildNumber"; private final String applicationPackageRevisionField = "applicationPackageRevision"; private final String sourceRevisionField = "sourceRevision"; private final String repositoryField = "repositoryField"; private final String branchField = "branchField"; private final String commitField = "commitField"; private final String authorEmailField = "authorEmailField"; private final String compileVersionField = "compileVersion"; private final String buildTimeField = "buildTime"; private final String lastQueriedField = "lastQueried"; private final String lastWrittenField = "lastWritten"; private final String lastQueriesPerSecondField = "lastQueriesPerSecond"; private final String lastWritesPerSecondField = "lastWritesPerSecond"; private final String projectIdField = "projectId"; private final String jobStatusField = "jobStatus"; private final String issueIdField = "jiraIssueId"; private final String builtInternallyField = "builtInternally"; private final String jobTypeField = "jobType"; private final String errorField = "jobError"; private final String lastTriggeredField = "lastTriggered"; private final String lastCompletedField = "lastCompleted"; private final String firstFailingField = "firstFailing"; private final String lastSuccessField = "lastSuccess"; private final String pausedUntilField = "pausedUntil"; private final String jobRunIdField = "id"; private final String versionField = "version"; private final String revisionField = "revision"; private final String sourceVersionField = "sourceVersion"; private final String sourceApplicationField = "sourceRevision"; private final String reasonField = "reason"; private final String atField = "at"; private final String clusterInfoField = "clusterInfo"; private final String clusterInfoFlavorField = "flavor"; private final String clusterInfoCostField = "cost"; private final String clusterInfoCpuField = "flavorCpu"; private final String clusterInfoMemField = "flavorMem"; private final String clusterInfoDiskField = "flavorDisk"; private final String clusterInfoTypeField = "clusterType"; private final String clusterInfoHostnamesField = "hostnames"; private final String clusterUtilsField = "clusterUtils"; private final String clusterUtilsCpuField = "cpu"; private final String clusterUtilsMemField = "mem"; private final String clusterUtilsDiskField = "disk"; private final String clusterUtilsDiskBusyField = "diskbusy"; private final String deploymentMetricsField = "metrics"; private final String deploymentMetricsQPSField = "queriesPerSecond"; private final String deploymentMetricsWPSField = "writesPerSecond"; private final String deploymentMetricsDocsField = "documentCount"; private final String deploymentMetricsQueryLatencyField = "queryLatencyMillis"; private final String deploymentMetricsWriteLatencyField = "writeLatencyMillis"; private final String deploymentMetricsUpdateTime = "lastUpdated"; private final String deploymentMetricsWarningsField = "warnings"; public Slime toSlime(Application application) { Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString(idField, application.id().serializedForm()); root.setLong(createdAtField, application.createdAt().toEpochMilli()); root.setString(deploymentSpecField, application.deploymentSpec().xmlForm()); root.setString(validationOverridesField, application.validationOverrides().xmlForm()); deploymentsToSlime(application.deployments().values(), root.setArray(deploymentsField)); toSlime(application.deploymentJobs(), root.setObject(deploymentJobsField)); toSlime(application.change(), root, deployingField); toSlime(application.outstandingChange(), root, outstandingChangeField); application.ownershipIssueId().ifPresent(issueId -> root.setString(ownershipIssueIdField, issueId.value())); application.owner().ifPresent(owner -> root.setString(ownerField, owner.username())); application.majorVersion().ifPresent(majorVersion -> root.setLong(majorVersionField, majorVersion)); root.setDouble(queryQualityField, application.metrics().queryServiceQuality()); root.setDouble(writeQualityField, application.metrics().writeServiceQuality()); application.pemDeployKey().ifPresent(pemDeployKey -> root.setString(pemDeployKeyField, pemDeployKey)); application.legacyRotation().ifPresent(rotation -> root.setString(deprecatedRotationField, rotation.asString())); rotationsToSlime(application.assignedRotations(), root, rotationsField); assignedRotationsToSlime(application.assignedRotations(), root, assignedRotationsField); toSlime(application.rotationStatus(), root.setArray(rotationStatusField)); return slime; } private void deploymentsToSlime(Collection<Deployment> deployments, Cursor array) { for (Deployment deployment : deployments) deploymentToSlime(deployment, array.addObject()); } private void deploymentToSlime(Deployment deployment, Cursor object) { zoneIdToSlime(deployment.zone(), object.setObject(zoneField)); object.setString(versionField, deployment.version().toString()); object.setLong(deployTimeField, deployment.at().toEpochMilli()); toSlime(deployment.applicationVersion(), object.setObject(applicationPackageRevisionField)); clusterInfoToSlime(deployment.clusterInfo(), object); clusterUtilsToSlime(deployment.clusterUtils(), object); deploymentMetricsToSlime(deployment.metrics(), object); deployment.activity().lastQueried().ifPresent(instant -> object.setLong(lastQueriedField, instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> object.setLong(lastWrittenField, instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> object.setDouble(lastQueriesPerSecondField, value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> object.setDouble(lastWritesPerSecondField, value)); } private void deploymentMetricsToSlime(DeploymentMetrics metrics, Cursor object) { Cursor root = object.setObject(deploymentMetricsField); root.setDouble(deploymentMetricsQPSField, metrics.queriesPerSecond()); root.setDouble(deploymentMetricsWPSField, metrics.writesPerSecond()); root.setDouble(deploymentMetricsDocsField, metrics.documentCount()); root.setDouble(deploymentMetricsQueryLatencyField, metrics.queryLatencyMillis()); root.setDouble(deploymentMetricsWriteLatencyField, metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> root.setLong(deploymentMetricsUpdateTime, instant.toEpochMilli())); if (!metrics.warnings().isEmpty()) { Cursor warningsObject = root.setObject(deploymentMetricsWarningsField); metrics.warnings().forEach((warning, count) -> warningsObject.setLong(warning.name(), count)); } } private void clusterInfoToSlime(Map<ClusterSpec.Id, ClusterInfo> clusters, Cursor object) { Cursor root = object.setObject(clusterInfoField); for (Map.Entry<ClusterSpec.Id, ClusterInfo> entry : clusters.entrySet()) { toSlime(entry.getValue(), root.setObject(entry.getKey().value())); } } private void toSlime(ClusterInfo info, Cursor object) { object.setString(clusterInfoFlavorField, info.getFlavor()); object.setLong(clusterInfoCostField, info.getFlavorCost()); object.setDouble(clusterInfoCpuField, info.getFlavorCPU()); object.setDouble(clusterInfoMemField, info.getFlavorMem()); object.setDouble(clusterInfoDiskField, info.getFlavorDisk()); object.setString(clusterInfoTypeField, info.getClusterType().name()); Cursor array = object.setArray(clusterInfoHostnamesField); for (String host : info.getHostnames()) { array.addString(host); } } private void clusterUtilsToSlime(Map<ClusterSpec.Id, ClusterUtilization> clusters, Cursor object) { Cursor root = object.setObject(clusterUtilsField); for (Map.Entry<ClusterSpec.Id, ClusterUtilization> entry : clusters.entrySet()) { toSlime(entry.getValue(), root.setObject(entry.getKey().value())); } } private void toSlime(ClusterUtilization utils, Cursor object) { object.setDouble(clusterUtilsCpuField, utils.getCpu()); object.setDouble(clusterUtilsMemField, utils.getMemory()); object.setDouble(clusterUtilsDiskField, utils.getDisk()); object.setDouble(clusterUtilsDiskBusyField, utils.getDiskBusy()); } private void zoneIdToSlime(ZoneId zone, Cursor object) { object.setString(environmentField, zone.environment().value()); object.setString(regionField, zone.region().value()); } private void toSlime(ApplicationVersion applicationVersion, Cursor object) { if (applicationVersion.buildNumber().isPresent() && applicationVersion.source().isPresent()) { object.setLong(applicationBuildNumberField, applicationVersion.buildNumber().getAsLong()); toSlime(applicationVersion.source().get(), object.setObject(sourceRevisionField)); applicationVersion.authorEmail().ifPresent(email -> object.setString(authorEmailField, email)); applicationVersion.compileVersion().ifPresent(version -> object.setString(compileVersionField, version.toString())); applicationVersion.buildTime().ifPresent(time -> object.setLong(buildTimeField, time.toEpochMilli())); } } private void toSlime(SourceRevision sourceRevision, Cursor object) { object.setString(repositoryField, sourceRevision.repository()); object.setString(branchField, sourceRevision.branch()); object.setString(commitField, sourceRevision.commit()); } private void toSlime(DeploymentJobs deploymentJobs, Cursor cursor) { deploymentJobs.projectId().ifPresent(projectId -> cursor.setLong(projectIdField, projectId)); jobStatusToSlime(deploymentJobs.jobStatus().values(), cursor.setArray(jobStatusField)); deploymentJobs.issueId().ifPresent(jiraIssueId -> cursor.setString(issueIdField, jiraIssueId.value())); cursor.setBool(builtInternallyField, deploymentJobs.deployedInternally()); } private void jobStatusToSlime(Collection<JobStatus> jobStatuses, Cursor jobStatusArray) { for (JobStatus jobStatus : jobStatuses) toSlime(jobStatus, jobStatusArray.addObject()); } private void toSlime(JobStatus jobStatus, Cursor object) { object.setString(jobTypeField, jobStatus.type().jobName()); if (jobStatus.jobError().isPresent()) object.setString(errorField, jobStatus.jobError().get().name()); jobStatus.lastTriggered().ifPresent(run -> jobRunToSlime(run, object, lastTriggeredField)); jobStatus.lastCompleted().ifPresent(run -> jobRunToSlime(run, object, lastCompletedField)); jobStatus.lastSuccess().ifPresent(run -> jobRunToSlime(run, object, lastSuccessField)); jobStatus.firstFailing().ifPresent(run -> jobRunToSlime(run, object, firstFailingField)); jobStatus.pausedUntil().ifPresent(until -> object.setLong(pausedUntilField, until)); } private void jobRunToSlime(JobStatus.JobRun jobRun, Cursor parent, String jobRunObjectName) { Cursor object = parent.setObject(jobRunObjectName); object.setLong(jobRunIdField, jobRun.id()); object.setString(versionField, jobRun.platform().toString()); toSlime(jobRun.application(), object.setObject(revisionField)); jobRun.sourcePlatform().ifPresent(version -> object.setString(sourceVersionField, version.toString())); jobRun.sourceApplication().ifPresent(version -> toSlime(version, object.setObject(sourceApplicationField))); object.setString(reasonField, jobRun.reason()); object.setLong(atField, jobRun.at().toEpochMilli()); } private void toSlime(Change deploying, Cursor parentObject, String fieldName) { if (deploying.isEmpty()) return; Cursor object = parentObject.setObject(fieldName); if (deploying.platform().isPresent()) object.setString(versionField, deploying.platform().get().toString()); if (deploying.application().isPresent()) toSlime(deploying.application().get(), object); if (deploying.isPinned()) object.setBool(pinnedField, true); } private void toSlime(Map<HostName, RotationStatus> rotationStatus, Cursor array) { rotationStatus.forEach((hostname, status) -> { Cursor object = array.addObject(); object.setString("hostname", hostname.value()); object.setString("status", status.name()); }); } private void rotationsToSlime(List<AssignedRotation> rotations, Cursor parent, String fieldName) { final var rotationsArray = parent.setArray(fieldName); rotations.forEach(rot -> rotationsArray.addString(rot.rotationId().asString())); } public Application fromSlime(Slime slime) { Inspector root = slime.get(); ApplicationId id = ApplicationId.fromSerializedForm(root.field(idField).asString()); Instant createdAt = Instant.ofEpochMilli(root.field(createdAtField).asLong()); DeploymentSpec deploymentSpec = DeploymentSpec.fromXml(root.field(deploymentSpecField).asString(), false); ValidationOverrides validationOverrides = ValidationOverrides.fromXml(root.field(validationOverridesField).asString()); List<Deployment> deployments = deploymentsFromSlime(root.field(deploymentsField)); DeploymentJobs deploymentJobs = deploymentJobsFromSlime(root.field(deploymentJobsField)); Change deploying = changeFromSlime(root.field(deployingField)); Change outstandingChange = changeFromSlime(root.field(outstandingChangeField)); Optional<IssueId> ownershipIssueId = optionalString(root.field(ownershipIssueIdField)).map(IssueId::from); Optional<User> owner = optionalString(root.field(ownerField)).map(User::from); OptionalInt majorVersion = optionalInteger(root.field(majorVersionField)); ApplicationMetrics metrics = new ApplicationMetrics(root.field(queryQualityField).asDouble(), root.field(writeQualityField).asDouble()); Optional<String> pemDeployKey = optionalString(root.field(pemDeployKeyField)); List<AssignedRotation> assignedRotations = assignedRotationsFromSlime(deploymentSpec, root); Map<HostName, RotationStatus> rotationStatus = rotationStatusFromSlime(root.field(rotationStatusField)); return new Application(id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, deploying, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, assignedRotations, rotationStatus); } private List<Deployment> deploymentsFromSlime(Inspector array) { List<Deployment> deployments = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> deployments.add(deploymentFromSlime(item))); return deployments; } private Deployment deploymentFromSlime(Inspector deploymentObject) { return new Deployment(zoneIdFromSlime(deploymentObject.field(zoneField)), applicationVersionFromSlime(deploymentObject.field(applicationPackageRevisionField)), Version.fromString(deploymentObject.field(versionField).asString()), Instant.ofEpochMilli(deploymentObject.field(deployTimeField).asLong()), clusterUtilsMapFromSlime(deploymentObject.field(clusterUtilsField)), clusterInfoMapFromSlime(deploymentObject.field(clusterInfoField)), deploymentMetricsFromSlime(deploymentObject.field(deploymentMetricsField)), DeploymentActivity.create(optionalInstant(deploymentObject.field(lastQueriedField)), optionalInstant(deploymentObject.field(lastWrittenField)), optionalDouble(deploymentObject.field(lastQueriesPerSecondField)), optionalDouble(deploymentObject.field(lastWritesPerSecondField)))); } private DeploymentMetrics deploymentMetricsFromSlime(Inspector object) { Optional<Instant> instant = object.field(deploymentMetricsUpdateTime).valid() ? Optional.of(Instant.ofEpochMilli(object.field(deploymentMetricsUpdateTime).asLong())) : Optional.empty(); return new DeploymentMetrics(object.field(deploymentMetricsQPSField).asDouble(), object.field(deploymentMetricsWPSField).asDouble(), object.field(deploymentMetricsDocsField).asDouble(), object.field(deploymentMetricsQueryLatencyField).asDouble(), object.field(deploymentMetricsWriteLatencyField).asDouble(), instant, deploymentWarningsFrom(object.field(deploymentMetricsWarningsField))); } private Map<DeploymentMetrics.Warning, Integer> deploymentWarningsFrom(Inspector object) { Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>(); object.traverse((ObjectTraverser) (name, value) -> warnings.put(DeploymentMetrics.Warning.valueOf(name), (int) value.asLong())); return Collections.unmodifiableMap(warnings); } private Map<HostName, RotationStatus> rotationStatusFromSlime(Inspector object) { if (!object.valid()) { return Collections.emptyMap(); } Map<HostName, RotationStatus> rotationStatus = new TreeMap<>(); object.traverse((ArrayTraverser) (idx, inspect) -> { HostName hostname = HostName.from(inspect.field("hostname").asString()); RotationStatus status = RotationStatus.valueOf(inspect.field("status").asString()); rotationStatus.put(hostname, status); }); return Collections.unmodifiableMap(rotationStatus); } private Map<ClusterSpec.Id, ClusterInfo> clusterInfoMapFromSlime (Inspector object) { Map<ClusterSpec.Id, ClusterInfo> map = new HashMap<>(); object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterInfoFromSlime(value))); return map; } private Map<ClusterSpec.Id, ClusterUtilization> clusterUtilsMapFromSlime(Inspector object) { Map<ClusterSpec.Id, ClusterUtilization> map = new HashMap<>(); object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterUtililzationFromSlime(value))); return map; } private ClusterUtilization clusterUtililzationFromSlime(Inspector object) { double cpu = object.field(clusterUtilsCpuField).asDouble(); double mem = object.field(clusterUtilsMemField).asDouble(); double disk = object.field(clusterUtilsDiskField).asDouble(); double diskBusy = object.field(clusterUtilsDiskBusyField).asDouble(); return new ClusterUtilization(mem, cpu, disk, diskBusy); } private ClusterInfo clusterInfoFromSlime(Inspector inspector) { String flavor = inspector.field(clusterInfoFlavorField).asString(); int cost = (int)inspector.field(clusterInfoCostField).asLong(); String type = inspector.field(clusterInfoTypeField).asString(); double flavorCpu = inspector.field(clusterInfoCpuField).asDouble(); double flavorMem = inspector.field(clusterInfoMemField).asDouble(); double flavorDisk = inspector.field(clusterInfoDiskField).asDouble(); List<String> hostnames = new ArrayList<>(); inspector.field(clusterInfoHostnamesField).traverse((ArrayTraverser)(int index, Inspector value) -> hostnames.add(value.asString())); return new ClusterInfo(flavor, cost, flavorCpu, flavorMem, flavorDisk, ClusterSpec.Type.from(type), hostnames); } private ZoneId zoneIdFromSlime(Inspector object) { return ZoneId.from(object.field(environmentField).asString(), object.field(regionField).asString()); } private ApplicationVersion applicationVersionFromSlime(Inspector object) { if ( ! object.valid()) return ApplicationVersion.unknown; OptionalLong applicationBuildNumber = optionalLong(object.field(applicationBuildNumberField)); Optional<SourceRevision> sourceRevision = sourceRevisionFromSlime(object.field(sourceRevisionField)); if ( ! sourceRevision.isPresent() || ! applicationBuildNumber.isPresent()) { return ApplicationVersion.unknown; } Optional<String> authorEmail = optionalString(object.field(authorEmailField)); Optional<Version> compileVersion = optionalString(object.field(compileVersionField)).map(Version::fromString); Optional<Instant> buildTime = optionalInstant(object.field(buildTimeField)); if ( ! authorEmail.isPresent()) return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong()); if ( ! compileVersion.isPresent() || ! buildTime.isPresent()) return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong(), authorEmail.get()); return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong(), authorEmail.get(), compileVersion.get(), buildTime.get()); } private Optional<SourceRevision> sourceRevisionFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new SourceRevision(object.field(repositoryField).asString(), object.field(branchField).asString(), object.field(commitField).asString())); } private DeploymentJobs deploymentJobsFromSlime(Inspector object) { OptionalLong projectId = optionalLong(object.field(projectIdField)); List<JobStatus> jobStatusList = jobStatusListFromSlime(object.field(jobStatusField)); Optional<IssueId> issueId = optionalString(object.field(issueIdField)).map(IssueId::from); boolean builtInternally = object.field(builtInternallyField).asBool(); return new DeploymentJobs(projectId, jobStatusList, issueId, builtInternally); } private Change changeFromSlime(Inspector object) { if ( ! object.valid()) return Change.empty(); Inspector versionFieldValue = object.field(versionField); Change change = Change.empty(); if (versionFieldValue.valid()) change = Change.of(Version.fromString(versionFieldValue.asString())); if (object.field(applicationBuildNumberField).valid()) change = change.with(applicationVersionFromSlime(object)); if (object.field(pinnedField).asBool()) change = change.withPin(); return change; } private List<JobStatus> jobStatusListFromSlime(Inspector array) { List<JobStatus> jobStatusList = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> jobStatusFromSlime(item).ifPresent(jobStatusList::add)); return jobStatusList; } private Optional<JobStatus> jobStatusFromSlime(Inspector object) { Optional<JobType> jobType = JobType.fromOptionalJobName(object.field(jobTypeField).asString()); if (! jobType.isPresent()) return Optional.empty(); Optional<JobError> jobError = Optional.empty(); if (object.field(errorField).valid()) jobError = Optional.of(JobError.valueOf(object.field(errorField).asString())); return Optional.of(new JobStatus(jobType.get(), jobError, jobRunFromSlime(object.field(lastTriggeredField)), jobRunFromSlime(object.field(lastCompletedField)), jobRunFromSlime(object.field(firstFailingField)), jobRunFromSlime(object.field(lastSuccessField)), optionalLong(object.field(pausedUntilField)))); } private Optional<JobStatus.JobRun> jobRunFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new JobStatus.JobRun(object.field(jobRunIdField).asLong(), new Version(object.field(versionField).asString()), applicationVersionFromSlime(object.field(revisionField)), optionalString(object.field(sourceVersionField)).map(Version::fromString), Optional.of(object.field(sourceApplicationField)).filter(Inspector::valid).map(this::applicationVersionFromSlime), object.field(reasonField).asString(), Instant.ofEpochMilli(object.field(atField).asLong()))); } private List<AssignedRotation> assignedRotationsFromSlime(DeploymentSpec deploymentSpec, Inspector root) { final var assignedRotations = new LinkedHashSet<AssignedRotation>(); final var legacyRotation = legacyRotationFromSlime(root.field(deprecatedRotationField)); if (legacyRotation.isPresent() && deploymentSpec.globalServiceId().isPresent()) { final var clusterId = new ClusterSpec.Id(deploymentSpec.globalServiceId().get()); assignedRotations.add(new AssignedRotation(clusterId, EndpointId.default_(), legacyRotation.get())); } final var rotations = rotationListFromSlime(root.field(rotationsField)); for (var rotation : rotations) { if (deploymentSpec.globalServiceId().isPresent()) { final var clusterId = new ClusterSpec.Id(deploymentSpec.globalServiceId().get()); assignedRotations.add(new AssignedRotation(clusterId, EndpointId.default_(), rotation)); } } root.field(assignedRotationsField).traverse((ArrayTraverser) (idx, inspector) -> { final var clusterId = new ClusterSpec.Id(inspector.field("container").asString()); final var endpointId = EndpointId.of(inspector.field("endpoint").asString()); final var rotationId = new RotationId(inspector.field("rotation").asString()); assignedRotations.add(new AssignedRotation(clusterId, endpointId, rotationId)); }); return List.copyOf(assignedRotations); } private List<RotationId> rotationListFromSlime(Inspector field) { final var rotations = new ArrayList<RotationId>(); field.traverse((ArrayTraverser) (idx, inspector) -> { final var rotation = new RotationId(inspector.asString()); rotations.add(rotation); }); return rotations; } private Optional<RotationId> legacyRotationFromSlime(Inspector field) { return field.valid() ? optionalString(field).map(RotationId::new) : Optional.empty(); } private OptionalLong optionalLong(Inspector field) { return field.valid() ? OptionalLong.of(field.asLong()) : OptionalLong.empty(); } private OptionalInt optionalInteger(Inspector field) { return field.valid() ? OptionalInt.of((int) field.asLong()) : OptionalInt.empty(); } private OptionalDouble optionalDouble(Inspector field) { return field.valid() ? OptionalDouble.of(field.asDouble()) : OptionalDouble.empty(); } private Optional<String> optionalString(Inspector field) { return SlimeUtils.optionalString(field); } private Optional<Instant> optionalInstant(Inspector field) { OptionalLong value = optionalLong(field); return value.isPresent() ? Optional.of(Instant.ofEpochMilli(value.getAsLong())) : Optional.empty(); } }
class ApplicationSerializer { private final String idField = "id"; private final String createdAtField = "createdAt"; private final String deploymentSpecField = "deploymentSpecField"; private final String validationOverridesField = "validationOverrides"; private final String deploymentsField = "deployments"; private final String deploymentJobsField = "deploymentJobs"; private final String deployingField = "deployingField"; private final String pinnedField = "pinned"; private final String outstandingChangeField = "outstandingChangeField"; private final String ownershipIssueIdField = "ownershipIssueId"; private final String ownerField = "confirmedOwner"; private final String majorVersionField = "majorVersion"; private final String writeQualityField = "writeQuality"; private final String queryQualityField = "queryQuality"; private final String pemDeployKeyField = "pemDeployKey"; private final String assignedRotationsField = "assignedRotations"; private final String assignedRotationEndpointField = "endpointId"; private final String assignedRotationClusterField = "clusterId"; private final String assignedRotationRotationField = "rotationId"; private final String rotationsField = "endpoints"; private final String deprecatedRotationField = "rotation"; private final String rotationStatusField = "rotationStatus"; private final String zoneField = "zone"; private final String environmentField = "environment"; private final String regionField = "region"; private final String deployTimeField = "deployTime"; private final String applicationBuildNumberField = "applicationBuildNumber"; private final String applicationPackageRevisionField = "applicationPackageRevision"; private final String sourceRevisionField = "sourceRevision"; private final String repositoryField = "repositoryField"; private final String branchField = "branchField"; private final String commitField = "commitField"; private final String authorEmailField = "authorEmailField"; private final String compileVersionField = "compileVersion"; private final String buildTimeField = "buildTime"; private final String lastQueriedField = "lastQueried"; private final String lastWrittenField = "lastWritten"; private final String lastQueriesPerSecondField = "lastQueriesPerSecond"; private final String lastWritesPerSecondField = "lastWritesPerSecond"; private final String projectIdField = "projectId"; private final String jobStatusField = "jobStatus"; private final String issueIdField = "jiraIssueId"; private final String builtInternallyField = "builtInternally"; private final String jobTypeField = "jobType"; private final String errorField = "jobError"; private final String lastTriggeredField = "lastTriggered"; private final String lastCompletedField = "lastCompleted"; private final String firstFailingField = "firstFailing"; private final String lastSuccessField = "lastSuccess"; private final String pausedUntilField = "pausedUntil"; private final String jobRunIdField = "id"; private final String versionField = "version"; private final String revisionField = "revision"; private final String sourceVersionField = "sourceVersion"; private final String sourceApplicationField = "sourceRevision"; private final String reasonField = "reason"; private final String atField = "at"; private final String clusterInfoField = "clusterInfo"; private final String clusterInfoFlavorField = "flavor"; private final String clusterInfoCostField = "cost"; private final String clusterInfoCpuField = "flavorCpu"; private final String clusterInfoMemField = "flavorMem"; private final String clusterInfoDiskField = "flavorDisk"; private final String clusterInfoTypeField = "clusterType"; private final String clusterInfoHostnamesField = "hostnames"; private final String clusterUtilsField = "clusterUtils"; private final String clusterUtilsCpuField = "cpu"; private final String clusterUtilsMemField = "mem"; private final String clusterUtilsDiskField = "disk"; private final String clusterUtilsDiskBusyField = "diskbusy"; private final String deploymentMetricsField = "metrics"; private final String deploymentMetricsQPSField = "queriesPerSecond"; private final String deploymentMetricsWPSField = "writesPerSecond"; private final String deploymentMetricsDocsField = "documentCount"; private final String deploymentMetricsQueryLatencyField = "queryLatencyMillis"; private final String deploymentMetricsWriteLatencyField = "writeLatencyMillis"; private final String deploymentMetricsUpdateTime = "lastUpdated"; private final String deploymentMetricsWarningsField = "warnings"; public Slime toSlime(Application application) { Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString(idField, application.id().serializedForm()); root.setLong(createdAtField, application.createdAt().toEpochMilli()); root.setString(deploymentSpecField, application.deploymentSpec().xmlForm()); root.setString(validationOverridesField, application.validationOverrides().xmlForm()); deploymentsToSlime(application.deployments().values(), root.setArray(deploymentsField)); toSlime(application.deploymentJobs(), root.setObject(deploymentJobsField)); toSlime(application.change(), root, deployingField); toSlime(application.outstandingChange(), root, outstandingChangeField); application.ownershipIssueId().ifPresent(issueId -> root.setString(ownershipIssueIdField, issueId.value())); application.owner().ifPresent(owner -> root.setString(ownerField, owner.username())); application.majorVersion().ifPresent(majorVersion -> root.setLong(majorVersionField, majorVersion)); root.setDouble(queryQualityField, application.metrics().queryServiceQuality()); root.setDouble(writeQualityField, application.metrics().writeServiceQuality()); application.pemDeployKey().ifPresent(pemDeployKey -> root.setString(pemDeployKeyField, pemDeployKey)); application.legacyRotation().ifPresent(rotation -> root.setString(deprecatedRotationField, rotation.asString())); rotationsToSlime(application.assignedRotations(), root, rotationsField); assignedRotationsToSlime(application.assignedRotations(), root, assignedRotationsField); toSlime(application.rotationStatus(), root.setArray(rotationStatusField)); return slime; } private void deploymentsToSlime(Collection<Deployment> deployments, Cursor array) { for (Deployment deployment : deployments) deploymentToSlime(deployment, array.addObject()); } private void deploymentToSlime(Deployment deployment, Cursor object) { zoneIdToSlime(deployment.zone(), object.setObject(zoneField)); object.setString(versionField, deployment.version().toString()); object.setLong(deployTimeField, deployment.at().toEpochMilli()); toSlime(deployment.applicationVersion(), object.setObject(applicationPackageRevisionField)); clusterInfoToSlime(deployment.clusterInfo(), object); clusterUtilsToSlime(deployment.clusterUtils(), object); deploymentMetricsToSlime(deployment.metrics(), object); deployment.activity().lastQueried().ifPresent(instant -> object.setLong(lastQueriedField, instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> object.setLong(lastWrittenField, instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> object.setDouble(lastQueriesPerSecondField, value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> object.setDouble(lastWritesPerSecondField, value)); } private void deploymentMetricsToSlime(DeploymentMetrics metrics, Cursor object) { Cursor root = object.setObject(deploymentMetricsField); root.setDouble(deploymentMetricsQPSField, metrics.queriesPerSecond()); root.setDouble(deploymentMetricsWPSField, metrics.writesPerSecond()); root.setDouble(deploymentMetricsDocsField, metrics.documentCount()); root.setDouble(deploymentMetricsQueryLatencyField, metrics.queryLatencyMillis()); root.setDouble(deploymentMetricsWriteLatencyField, metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> root.setLong(deploymentMetricsUpdateTime, instant.toEpochMilli())); if (!metrics.warnings().isEmpty()) { Cursor warningsObject = root.setObject(deploymentMetricsWarningsField); metrics.warnings().forEach((warning, count) -> warningsObject.setLong(warning.name(), count)); } } private void clusterInfoToSlime(Map<ClusterSpec.Id, ClusterInfo> clusters, Cursor object) { Cursor root = object.setObject(clusterInfoField); for (Map.Entry<ClusterSpec.Id, ClusterInfo> entry : clusters.entrySet()) { toSlime(entry.getValue(), root.setObject(entry.getKey().value())); } } private void toSlime(ClusterInfo info, Cursor object) { object.setString(clusterInfoFlavorField, info.getFlavor()); object.setLong(clusterInfoCostField, info.getFlavorCost()); object.setDouble(clusterInfoCpuField, info.getFlavorCPU()); object.setDouble(clusterInfoMemField, info.getFlavorMem()); object.setDouble(clusterInfoDiskField, info.getFlavorDisk()); object.setString(clusterInfoTypeField, info.getClusterType().name()); Cursor array = object.setArray(clusterInfoHostnamesField); for (String host : info.getHostnames()) { array.addString(host); } } private void clusterUtilsToSlime(Map<ClusterSpec.Id, ClusterUtilization> clusters, Cursor object) { Cursor root = object.setObject(clusterUtilsField); for (Map.Entry<ClusterSpec.Id, ClusterUtilization> entry : clusters.entrySet()) { toSlime(entry.getValue(), root.setObject(entry.getKey().value())); } } private void toSlime(ClusterUtilization utils, Cursor object) { object.setDouble(clusterUtilsCpuField, utils.getCpu()); object.setDouble(clusterUtilsMemField, utils.getMemory()); object.setDouble(clusterUtilsDiskField, utils.getDisk()); object.setDouble(clusterUtilsDiskBusyField, utils.getDiskBusy()); } private void zoneIdToSlime(ZoneId zone, Cursor object) { object.setString(environmentField, zone.environment().value()); object.setString(regionField, zone.region().value()); } private void toSlime(ApplicationVersion applicationVersion, Cursor object) { if (applicationVersion.buildNumber().isPresent() && applicationVersion.source().isPresent()) { object.setLong(applicationBuildNumberField, applicationVersion.buildNumber().getAsLong()); toSlime(applicationVersion.source().get(), object.setObject(sourceRevisionField)); applicationVersion.authorEmail().ifPresent(email -> object.setString(authorEmailField, email)); applicationVersion.compileVersion().ifPresent(version -> object.setString(compileVersionField, version.toString())); applicationVersion.buildTime().ifPresent(time -> object.setLong(buildTimeField, time.toEpochMilli())); } } private void toSlime(SourceRevision sourceRevision, Cursor object) { object.setString(repositoryField, sourceRevision.repository()); object.setString(branchField, sourceRevision.branch()); object.setString(commitField, sourceRevision.commit()); } private void toSlime(DeploymentJobs deploymentJobs, Cursor cursor) { deploymentJobs.projectId().ifPresent(projectId -> cursor.setLong(projectIdField, projectId)); jobStatusToSlime(deploymentJobs.jobStatus().values(), cursor.setArray(jobStatusField)); deploymentJobs.issueId().ifPresent(jiraIssueId -> cursor.setString(issueIdField, jiraIssueId.value())); cursor.setBool(builtInternallyField, deploymentJobs.deployedInternally()); } private void jobStatusToSlime(Collection<JobStatus> jobStatuses, Cursor jobStatusArray) { for (JobStatus jobStatus : jobStatuses) toSlime(jobStatus, jobStatusArray.addObject()); } private void toSlime(JobStatus jobStatus, Cursor object) { object.setString(jobTypeField, jobStatus.type().jobName()); if (jobStatus.jobError().isPresent()) object.setString(errorField, jobStatus.jobError().get().name()); jobStatus.lastTriggered().ifPresent(run -> jobRunToSlime(run, object, lastTriggeredField)); jobStatus.lastCompleted().ifPresent(run -> jobRunToSlime(run, object, lastCompletedField)); jobStatus.lastSuccess().ifPresent(run -> jobRunToSlime(run, object, lastSuccessField)); jobStatus.firstFailing().ifPresent(run -> jobRunToSlime(run, object, firstFailingField)); jobStatus.pausedUntil().ifPresent(until -> object.setLong(pausedUntilField, until)); } private void jobRunToSlime(JobStatus.JobRun jobRun, Cursor parent, String jobRunObjectName) { Cursor object = parent.setObject(jobRunObjectName); object.setLong(jobRunIdField, jobRun.id()); object.setString(versionField, jobRun.platform().toString()); toSlime(jobRun.application(), object.setObject(revisionField)); jobRun.sourcePlatform().ifPresent(version -> object.setString(sourceVersionField, version.toString())); jobRun.sourceApplication().ifPresent(version -> toSlime(version, object.setObject(sourceApplicationField))); object.setString(reasonField, jobRun.reason()); object.setLong(atField, jobRun.at().toEpochMilli()); } private void toSlime(Change deploying, Cursor parentObject, String fieldName) { if (deploying.isEmpty()) return; Cursor object = parentObject.setObject(fieldName); if (deploying.platform().isPresent()) object.setString(versionField, deploying.platform().get().toString()); if (deploying.application().isPresent()) toSlime(deploying.application().get(), object); if (deploying.isPinned()) object.setBool(pinnedField, true); } private void toSlime(Map<HostName, RotationStatus> rotationStatus, Cursor array) { rotationStatus.forEach((hostname, status) -> { Cursor object = array.addObject(); object.setString("hostname", hostname.value()); object.setString("status", status.name()); }); } private void rotationsToSlime(List<AssignedRotation> rotations, Cursor parent, String fieldName) { final var rotationsArray = parent.setArray(fieldName); rotations.forEach(rot -> rotationsArray.addString(rot.rotationId().asString())); } public Application fromSlime(Slime slime) { Inspector root = slime.get(); ApplicationId id = ApplicationId.fromSerializedForm(root.field(idField).asString()); Instant createdAt = Instant.ofEpochMilli(root.field(createdAtField).asLong()); DeploymentSpec deploymentSpec = DeploymentSpec.fromXml(root.field(deploymentSpecField).asString(), false); ValidationOverrides validationOverrides = ValidationOverrides.fromXml(root.field(validationOverridesField).asString()); List<Deployment> deployments = deploymentsFromSlime(root.field(deploymentsField)); DeploymentJobs deploymentJobs = deploymentJobsFromSlime(root.field(deploymentJobsField)); Change deploying = changeFromSlime(root.field(deployingField)); Change outstandingChange = changeFromSlime(root.field(outstandingChangeField)); Optional<IssueId> ownershipIssueId = optionalString(root.field(ownershipIssueIdField)).map(IssueId::from); Optional<User> owner = optionalString(root.field(ownerField)).map(User::from); OptionalInt majorVersion = optionalInteger(root.field(majorVersionField)); ApplicationMetrics metrics = new ApplicationMetrics(root.field(queryQualityField).asDouble(), root.field(writeQualityField).asDouble()); Optional<String> pemDeployKey = optionalString(root.field(pemDeployKeyField)); List<AssignedRotation> assignedRotations = assignedRotationsFromSlime(deploymentSpec, root); Map<HostName, RotationStatus> rotationStatus = rotationStatusFromSlime(root.field(rotationStatusField)); return new Application(id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, deploying, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, assignedRotations, rotationStatus); } private List<Deployment> deploymentsFromSlime(Inspector array) { List<Deployment> deployments = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> deployments.add(deploymentFromSlime(item))); return deployments; } private Deployment deploymentFromSlime(Inspector deploymentObject) { return new Deployment(zoneIdFromSlime(deploymentObject.field(zoneField)), applicationVersionFromSlime(deploymentObject.field(applicationPackageRevisionField)), Version.fromString(deploymentObject.field(versionField).asString()), Instant.ofEpochMilli(deploymentObject.field(deployTimeField).asLong()), clusterUtilsMapFromSlime(deploymentObject.field(clusterUtilsField)), clusterInfoMapFromSlime(deploymentObject.field(clusterInfoField)), deploymentMetricsFromSlime(deploymentObject.field(deploymentMetricsField)), DeploymentActivity.create(optionalInstant(deploymentObject.field(lastQueriedField)), optionalInstant(deploymentObject.field(lastWrittenField)), optionalDouble(deploymentObject.field(lastQueriesPerSecondField)), optionalDouble(deploymentObject.field(lastWritesPerSecondField)))); } private DeploymentMetrics deploymentMetricsFromSlime(Inspector object) { Optional<Instant> instant = object.field(deploymentMetricsUpdateTime).valid() ? Optional.of(Instant.ofEpochMilli(object.field(deploymentMetricsUpdateTime).asLong())) : Optional.empty(); return new DeploymentMetrics(object.field(deploymentMetricsQPSField).asDouble(), object.field(deploymentMetricsWPSField).asDouble(), object.field(deploymentMetricsDocsField).asDouble(), object.field(deploymentMetricsQueryLatencyField).asDouble(), object.field(deploymentMetricsWriteLatencyField).asDouble(), instant, deploymentWarningsFrom(object.field(deploymentMetricsWarningsField))); } private Map<DeploymentMetrics.Warning, Integer> deploymentWarningsFrom(Inspector object) { Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>(); object.traverse((ObjectTraverser) (name, value) -> warnings.put(DeploymentMetrics.Warning.valueOf(name), (int) value.asLong())); return Collections.unmodifiableMap(warnings); } private Map<HostName, RotationStatus> rotationStatusFromSlime(Inspector object) { if (!object.valid()) { return Collections.emptyMap(); } Map<HostName, RotationStatus> rotationStatus = new TreeMap<>(); object.traverse((ArrayTraverser) (idx, inspect) -> { HostName hostname = HostName.from(inspect.field("hostname").asString()); RotationStatus status = RotationStatus.valueOf(inspect.field("status").asString()); rotationStatus.put(hostname, status); }); return Collections.unmodifiableMap(rotationStatus); } private Map<ClusterSpec.Id, ClusterInfo> clusterInfoMapFromSlime (Inspector object) { Map<ClusterSpec.Id, ClusterInfo> map = new HashMap<>(); object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterInfoFromSlime(value))); return map; } private Map<ClusterSpec.Id, ClusterUtilization> clusterUtilsMapFromSlime(Inspector object) { Map<ClusterSpec.Id, ClusterUtilization> map = new HashMap<>(); object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterUtililzationFromSlime(value))); return map; } private ClusterUtilization clusterUtililzationFromSlime(Inspector object) { double cpu = object.field(clusterUtilsCpuField).asDouble(); double mem = object.field(clusterUtilsMemField).asDouble(); double disk = object.field(clusterUtilsDiskField).asDouble(); double diskBusy = object.field(clusterUtilsDiskBusyField).asDouble(); return new ClusterUtilization(mem, cpu, disk, diskBusy); } private ClusterInfo clusterInfoFromSlime(Inspector inspector) { String flavor = inspector.field(clusterInfoFlavorField).asString(); int cost = (int)inspector.field(clusterInfoCostField).asLong(); String type = inspector.field(clusterInfoTypeField).asString(); double flavorCpu = inspector.field(clusterInfoCpuField).asDouble(); double flavorMem = inspector.field(clusterInfoMemField).asDouble(); double flavorDisk = inspector.field(clusterInfoDiskField).asDouble(); List<String> hostnames = new ArrayList<>(); inspector.field(clusterInfoHostnamesField).traverse((ArrayTraverser)(int index, Inspector value) -> hostnames.add(value.asString())); return new ClusterInfo(flavor, cost, flavorCpu, flavorMem, flavorDisk, ClusterSpec.Type.from(type), hostnames); } private ZoneId zoneIdFromSlime(Inspector object) { return ZoneId.from(object.field(environmentField).asString(), object.field(regionField).asString()); } private ApplicationVersion applicationVersionFromSlime(Inspector object) { if ( ! object.valid()) return ApplicationVersion.unknown; OptionalLong applicationBuildNumber = optionalLong(object.field(applicationBuildNumberField)); Optional<SourceRevision> sourceRevision = sourceRevisionFromSlime(object.field(sourceRevisionField)); if ( ! sourceRevision.isPresent() || ! applicationBuildNumber.isPresent()) { return ApplicationVersion.unknown; } Optional<String> authorEmail = optionalString(object.field(authorEmailField)); Optional<Version> compileVersion = optionalString(object.field(compileVersionField)).map(Version::fromString); Optional<Instant> buildTime = optionalInstant(object.field(buildTimeField)); if ( ! authorEmail.isPresent()) return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong()); if ( ! compileVersion.isPresent() || ! buildTime.isPresent()) return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong(), authorEmail.get()); return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong(), authorEmail.get(), compileVersion.get(), buildTime.get()); } private Optional<SourceRevision> sourceRevisionFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new SourceRevision(object.field(repositoryField).asString(), object.field(branchField).asString(), object.field(commitField).asString())); } private DeploymentJobs deploymentJobsFromSlime(Inspector object) { OptionalLong projectId = optionalLong(object.field(projectIdField)); List<JobStatus> jobStatusList = jobStatusListFromSlime(object.field(jobStatusField)); Optional<IssueId> issueId = optionalString(object.field(issueIdField)).map(IssueId::from); boolean builtInternally = object.field(builtInternallyField).asBool(); return new DeploymentJobs(projectId, jobStatusList, issueId, builtInternally); } private Change changeFromSlime(Inspector object) { if ( ! object.valid()) return Change.empty(); Inspector versionFieldValue = object.field(versionField); Change change = Change.empty(); if (versionFieldValue.valid()) change = Change.of(Version.fromString(versionFieldValue.asString())); if (object.field(applicationBuildNumberField).valid()) change = change.with(applicationVersionFromSlime(object)); if (object.field(pinnedField).asBool()) change = change.withPin(); return change; } private List<JobStatus> jobStatusListFromSlime(Inspector array) { List<JobStatus> jobStatusList = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> jobStatusFromSlime(item).ifPresent(jobStatusList::add)); return jobStatusList; } private Optional<JobStatus> jobStatusFromSlime(Inspector object) { Optional<JobType> jobType = JobType.fromOptionalJobName(object.field(jobTypeField).asString()); if (! jobType.isPresent()) return Optional.empty(); Optional<JobError> jobError = Optional.empty(); if (object.field(errorField).valid()) jobError = Optional.of(JobError.valueOf(object.field(errorField).asString())); return Optional.of(new JobStatus(jobType.get(), jobError, jobRunFromSlime(object.field(lastTriggeredField)), jobRunFromSlime(object.field(lastCompletedField)), jobRunFromSlime(object.field(firstFailingField)), jobRunFromSlime(object.field(lastSuccessField)), optionalLong(object.field(pausedUntilField)))); } private Optional<JobStatus.JobRun> jobRunFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new JobStatus.JobRun(object.field(jobRunIdField).asLong(), new Version(object.field(versionField).asString()), applicationVersionFromSlime(object.field(revisionField)), optionalString(object.field(sourceVersionField)).map(Version::fromString), Optional.of(object.field(sourceApplicationField)).filter(Inspector::valid).map(this::applicationVersionFromSlime), object.field(reasonField).asString(), Instant.ofEpochMilli(object.field(atField).asLong()))); } private List<AssignedRotation> assignedRotationsFromSlime(DeploymentSpec deploymentSpec, Inspector root) { final var assignedRotations = new LinkedHashSet<AssignedRotation>(); final var legacyRotation = legacyRotationFromSlime(root.field(deprecatedRotationField)); if (legacyRotation.isPresent() && deploymentSpec.globalServiceId().isPresent()) { final var clusterId = new ClusterSpec.Id(deploymentSpec.globalServiceId().get()); assignedRotations.add(new AssignedRotation(clusterId, EndpointId.default_(), legacyRotation.get())); } final var rotations = rotationListFromSlime(root.field(rotationsField)); for (var rotation : rotations) { if (deploymentSpec.globalServiceId().isPresent()) { final var clusterId = new ClusterSpec.Id(deploymentSpec.globalServiceId().get()); assignedRotations.add(new AssignedRotation(clusterId, EndpointId.default_(), rotation)); } } root.field(assignedRotationsField).traverse((ArrayTraverser) (idx, inspector) -> { final var clusterId = new ClusterSpec.Id(inspector.field(assignedRotationClusterField).asString()); final var endpointId = EndpointId.of(inspector.field(assignedRotationEndpointField).asString()); final var rotationId = new RotationId(inspector.field(assignedRotationRotationField).asString()); assignedRotations.add(new AssignedRotation(clusterId, endpointId, rotationId)); }); return List.copyOf(assignedRotations); } private List<RotationId> rotationListFromSlime(Inspector field) { final var rotations = new ArrayList<RotationId>(); field.traverse((ArrayTraverser) (idx, inspector) -> { final var rotation = new RotationId(inspector.asString()); rotations.add(rotation); }); return rotations; } private Optional<RotationId> legacyRotationFromSlime(Inspector field) { return field.valid() ? optionalString(field).map(RotationId::new) : Optional.empty(); } private OptionalLong optionalLong(Inspector field) { return field.valid() ? OptionalLong.of(field.asLong()) : OptionalLong.empty(); } private OptionalInt optionalInteger(Inspector field) { return field.valid() ? OptionalInt.of((int) field.asLong()) : OptionalInt.empty(); } private OptionalDouble optionalDouble(Inspector field) { return field.valid() ? OptionalDouble.of(field.asDouble()) : OptionalDouble.empty(); } private Optional<String> optionalString(Inspector field) { return SlimeUtils.optionalString(field); } private Optional<Instant> optionalInstant(Inspector field) { OptionalLong value = optionalLong(field); return value.isPresent() ? Optional.of(Instant.ofEpochMilli(value.getAsLong())) : Optional.empty(); } }
Is there a way to automatically enforce code styles? I would rather IntelliJ (or some plugin) deal with alignment instead of me. Ideally I would just like to reformat the whole file before I commit.
public LockedApplication with(List<AssignedRotation> assignedRotations) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, assignedRotations, rotationStatus); }
deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion,
public LockedApplication with(List<AssignedRotation> assignedRotations) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, assignedRotations, rotationStatus); }
class LockedApplication { private final Lock lock; private final ApplicationId id; private final Instant createdAt; private final DeploymentSpec deploymentSpec; private final ValidationOverrides validationOverrides; private final Map<ZoneId, Deployment> deployments; private final DeploymentJobs deploymentJobs; private final Change change; private final Change outstandingChange; private final Optional<IssueId> ownershipIssueId; private final Optional<User> owner; private final OptionalInt majorVersion; private final ApplicationMetrics metrics; private final Optional<String> pemDeployKey; private final List<AssignedRotation> rotations; private final Map<HostName, RotationStatus> rotationStatus; /** * Used to create a locked application * * @param application The application to lock. * @param lock The lock for the application. */ LockedApplication(Application application, Lock lock) { this(Objects.requireNonNull(lock, "lock cannot be null"), application.id(), application.createdAt(), application.deploymentSpec(), application.validationOverrides(), application.deployments(), application.deploymentJobs(), application.change(), application.outstandingChange(), application.ownershipIssueId(), application.owner(), application.majorVersion(), application.metrics(), application.pemDeployKey(), application.assignedRotations(), application.rotationStatus()); } private LockedApplication(Lock lock, ApplicationId id, Instant createdAt, DeploymentSpec deploymentSpec, ValidationOverrides validationOverrides, Map<ZoneId, Deployment> deployments, DeploymentJobs deploymentJobs, Change change, Change outstandingChange, Optional<IssueId> ownershipIssueId, Optional<User> owner, OptionalInt majorVersion, ApplicationMetrics metrics, Optional<String> pemDeployKey, List<AssignedRotation> rotations, Map<HostName, RotationStatus> rotationStatus) { this.lock = lock; this.id = id; this.createdAt = createdAt; this.deploymentSpec = deploymentSpec; this.validationOverrides = validationOverrides; this.deployments = deployments; this.deploymentJobs = deploymentJobs; this.change = change; this.outstandingChange = outstandingChange; this.ownershipIssueId = ownershipIssueId; this.owner = owner; this.majorVersion = majorVersion; this.metrics = metrics; this.pemDeployKey = pemDeployKey; this.rotations = rotations; this.rotationStatus = rotationStatus; } /** Returns a read-only copy of this */ public Application get() { return new Application(id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withBuiltInternally(boolean builtInternally) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.withBuiltInternally(builtInternally), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withProjectId(OptionalLong projectId) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.withProjectId(projectId), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withDeploymentIssueId(IssueId issueId) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.with(issueId), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withJobPause(JobType jobType, OptionalLong pausedUntil) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.withPause(jobType, pausedUntil), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withJobCompletion(long projectId, JobType jobType, JobStatus.JobRun completion, Optional<DeploymentJobs.JobError> jobError) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.withCompletion(projectId, jobType, completion, jobError), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withJobTriggering(JobType jobType, JobStatus.JobRun job) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.withTriggering(jobType, job), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withNewDeployment(ZoneId zone, ApplicationVersion applicationVersion, Version version, Instant instant, Map<DeploymentMetrics.Warning, Integer> warnings) { Deployment previousDeployment = deployments.getOrDefault(zone, new Deployment(zone, applicationVersion, version, instant)); Deployment newDeployment = new Deployment(zone, applicationVersion, version, instant, previousDeployment.clusterUtils(), previousDeployment.clusterInfo(), previousDeployment.metrics().with(warnings), previousDeployment.activity()); return with(newDeployment); } public LockedApplication withClusterUtilization(ZoneId zone, Map<ClusterSpec.Id, ClusterUtilization> clusterUtilization) { Deployment deployment = deployments.get(zone); if (deployment == null) return this; return with(deployment.withClusterUtils(clusterUtilization)); } public LockedApplication withClusterInfo(ZoneId zone, Map<ClusterSpec.Id, ClusterInfo> clusterInfo) { Deployment deployment = deployments.get(zone); if (deployment == null) return this; return with(deployment.withClusterInfo(clusterInfo)); } public LockedApplication recordActivityAt(Instant instant, ZoneId zone) { Deployment deployment = deployments.get(zone); if (deployment == null) return this; return with(deployment.recordActivityAt(instant)); } public LockedApplication with(ZoneId zone, DeploymentMetrics deploymentMetrics) { Deployment deployment = deployments.get(zone); if (deployment == null) return this; return with(deployment.withMetrics(deploymentMetrics)); } public LockedApplication withoutDeploymentIn(ZoneId zone) { Map<ZoneId, Deployment> deployments = new LinkedHashMap<>(this.deployments); deployments.remove(zone); return with(deployments); } public LockedApplication withoutDeploymentJob(JobType jobType) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.without(jobType), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication with(DeploymentSpec deploymentSpec) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication with(ValidationOverrides validationOverrides) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withChange(Change change) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withOutstandingChange(Change outstandingChange) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withOwnershipIssueId(IssueId issueId) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, Optional.ofNullable(issueId), owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withOwner(User owner) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, Optional.ofNullable(owner), majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } /** Set a major version for this, or set to null to remove any major version override */ public LockedApplication withMajorVersion(Integer majorVersion) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion == null ? OptionalInt.empty() : OptionalInt.of(majorVersion), metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication with(MetricsService.ApplicationMetrics metrics) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withPemDeployKey(String pemDeployKey) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, Optional.ofNullable(pemDeployKey), rotations, rotationStatus); } public LockedApplication withRotationStatus(Map<HostName, RotationStatus> rotationStatus) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } /** Don't expose non-leaf sub-objects. */ private LockedApplication with(Deployment deployment) { Map<ZoneId, Deployment> deployments = new LinkedHashMap<>(this.deployments); deployments.put(deployment.zone(), deployment); return with(deployments); } private LockedApplication with(Map<ZoneId, Deployment> deployments) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } @Override public String toString() { return "application '" + id + "'"; } }
class LockedApplication { private final Lock lock; private final ApplicationId id; private final Instant createdAt; private final DeploymentSpec deploymentSpec; private final ValidationOverrides validationOverrides; private final Map<ZoneId, Deployment> deployments; private final DeploymentJobs deploymentJobs; private final Change change; private final Change outstandingChange; private final Optional<IssueId> ownershipIssueId; private final Optional<User> owner; private final OptionalInt majorVersion; private final ApplicationMetrics metrics; private final Optional<String> pemDeployKey; private final List<AssignedRotation> rotations; private final Map<HostName, RotationStatus> rotationStatus; /** * Used to create a locked application * * @param application The application to lock. * @param lock The lock for the application. */ LockedApplication(Application application, Lock lock) { this(Objects.requireNonNull(lock, "lock cannot be null"), application.id(), application.createdAt(), application.deploymentSpec(), application.validationOverrides(), application.deployments(), application.deploymentJobs(), application.change(), application.outstandingChange(), application.ownershipIssueId(), application.owner(), application.majorVersion(), application.metrics(), application.pemDeployKey(), application.assignedRotations(), application.rotationStatus()); } private LockedApplication(Lock lock, ApplicationId id, Instant createdAt, DeploymentSpec deploymentSpec, ValidationOverrides validationOverrides, Map<ZoneId, Deployment> deployments, DeploymentJobs deploymentJobs, Change change, Change outstandingChange, Optional<IssueId> ownershipIssueId, Optional<User> owner, OptionalInt majorVersion, ApplicationMetrics metrics, Optional<String> pemDeployKey, List<AssignedRotation> rotations, Map<HostName, RotationStatus> rotationStatus) { this.lock = lock; this.id = id; this.createdAt = createdAt; this.deploymentSpec = deploymentSpec; this.validationOverrides = validationOverrides; this.deployments = deployments; this.deploymentJobs = deploymentJobs; this.change = change; this.outstandingChange = outstandingChange; this.ownershipIssueId = ownershipIssueId; this.owner = owner; this.majorVersion = majorVersion; this.metrics = metrics; this.pemDeployKey = pemDeployKey; this.rotations = rotations; this.rotationStatus = rotationStatus; } /** Returns a read-only copy of this */ public Application get() { return new Application(id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withBuiltInternally(boolean builtInternally) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.withBuiltInternally(builtInternally), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withProjectId(OptionalLong projectId) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.withProjectId(projectId), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withDeploymentIssueId(IssueId issueId) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.with(issueId), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withJobPause(JobType jobType, OptionalLong pausedUntil) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.withPause(jobType, pausedUntil), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withJobCompletion(long projectId, JobType jobType, JobStatus.JobRun completion, Optional<DeploymentJobs.JobError> jobError) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.withCompletion(projectId, jobType, completion, jobError), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withJobTriggering(JobType jobType, JobStatus.JobRun job) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.withTriggering(jobType, job), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withNewDeployment(ZoneId zone, ApplicationVersion applicationVersion, Version version, Instant instant, Map<DeploymentMetrics.Warning, Integer> warnings) { Deployment previousDeployment = deployments.getOrDefault(zone, new Deployment(zone, applicationVersion, version, instant)); Deployment newDeployment = new Deployment(zone, applicationVersion, version, instant, previousDeployment.clusterUtils(), previousDeployment.clusterInfo(), previousDeployment.metrics().with(warnings), previousDeployment.activity()); return with(newDeployment); } public LockedApplication withClusterUtilization(ZoneId zone, Map<ClusterSpec.Id, ClusterUtilization> clusterUtilization) { Deployment deployment = deployments.get(zone); if (deployment == null) return this; return with(deployment.withClusterUtils(clusterUtilization)); } public LockedApplication withClusterInfo(ZoneId zone, Map<ClusterSpec.Id, ClusterInfo> clusterInfo) { Deployment deployment = deployments.get(zone); if (deployment == null) return this; return with(deployment.withClusterInfo(clusterInfo)); } public LockedApplication recordActivityAt(Instant instant, ZoneId zone) { Deployment deployment = deployments.get(zone); if (deployment == null) return this; return with(deployment.recordActivityAt(instant)); } public LockedApplication with(ZoneId zone, DeploymentMetrics deploymentMetrics) { Deployment deployment = deployments.get(zone); if (deployment == null) return this; return with(deployment.withMetrics(deploymentMetrics)); } public LockedApplication withoutDeploymentIn(ZoneId zone) { Map<ZoneId, Deployment> deployments = new LinkedHashMap<>(this.deployments); deployments.remove(zone); return with(deployments); } public LockedApplication withoutDeploymentJob(JobType jobType) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.without(jobType), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication with(DeploymentSpec deploymentSpec) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication with(ValidationOverrides validationOverrides) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withChange(Change change) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withOutstandingChange(Change outstandingChange) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withOwnershipIssueId(IssueId issueId) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, Optional.ofNullable(issueId), owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withOwner(User owner) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, Optional.ofNullable(owner), majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } /** Set a major version for this, or set to null to remove any major version override */ public LockedApplication withMajorVersion(Integer majorVersion) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion == null ? OptionalInt.empty() : OptionalInt.of(majorVersion), metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication with(MetricsService.ApplicationMetrics metrics) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withPemDeployKey(String pemDeployKey) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, Optional.ofNullable(pemDeployKey), rotations, rotationStatus); } public LockedApplication withRotationStatus(Map<HostName, RotationStatus> rotationStatus) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } /** Don't expose non-leaf sub-objects. */ private LockedApplication with(Deployment deployment) { Map<ZoneId, Deployment> deployments = new LinkedHashMap<>(this.deployments); deployments.put(deployment.zone(), deployment); return with(deployments); } private LockedApplication with(Map<ZoneId, Deployment> deployments) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } @Override public String toString() { return "application '" + id + "'"; } }
Agreed and done. I didn't do it this way because that was not how it was done for other values in this file.
private void assignedRotationsToSlime(List<AssignedRotation> rotations, Cursor parent, String fieldName) { final var rotationsArray = parent.setArray(fieldName); for (var rotation : rotations) { final var object = rotationsArray.addObject(); object.setString("endpoint", rotation.endpointId().id()); object.setString("rotation", rotation.rotationId().asString()); object.setString("container", rotation.clusterId().value()); } }
object.setString("endpoint", rotation.endpointId().id());
private void assignedRotationsToSlime(List<AssignedRotation> rotations, Cursor parent, String fieldName) { final var rotationsArray = parent.setArray(fieldName); for (var rotation : rotations) { final var object = rotationsArray.addObject(); object.setString(assignedRotationEndpointField, rotation.endpointId().id()); object.setString(assignedRotationRotationField, rotation.rotationId().asString()); object.setString(assignedRotationClusterField, rotation.clusterId().value()); } }
class ApplicationSerializer { private final String idField = "id"; private final String createdAtField = "createdAt"; private final String deploymentSpecField = "deploymentSpecField"; private final String validationOverridesField = "validationOverrides"; private final String deploymentsField = "deployments"; private final String deploymentJobsField = "deploymentJobs"; private final String deployingField = "deployingField"; private final String pinnedField = "pinned"; private final String outstandingChangeField = "outstandingChangeField"; private final String ownershipIssueIdField = "ownershipIssueId"; private final String ownerField = "confirmedOwner"; private final String majorVersionField = "majorVersion"; private final String writeQualityField = "writeQuality"; private final String queryQualityField = "queryQuality"; private final String pemDeployKeyField = "pemDeployKey"; private final String assignedRotationsField = "assignedRotations"; private final String rotationsField = "endpoints"; private final String deprecatedRotationField = "rotation"; private final String rotationStatusField = "rotationStatus"; private final String zoneField = "zone"; private final String environmentField = "environment"; private final String regionField = "region"; private final String deployTimeField = "deployTime"; private final String applicationBuildNumberField = "applicationBuildNumber"; private final String applicationPackageRevisionField = "applicationPackageRevision"; private final String sourceRevisionField = "sourceRevision"; private final String repositoryField = "repositoryField"; private final String branchField = "branchField"; private final String commitField = "commitField"; private final String authorEmailField = "authorEmailField"; private final String compileVersionField = "compileVersion"; private final String buildTimeField = "buildTime"; private final String lastQueriedField = "lastQueried"; private final String lastWrittenField = "lastWritten"; private final String lastQueriesPerSecondField = "lastQueriesPerSecond"; private final String lastWritesPerSecondField = "lastWritesPerSecond"; private final String projectIdField = "projectId"; private final String jobStatusField = "jobStatus"; private final String issueIdField = "jiraIssueId"; private final String builtInternallyField = "builtInternally"; private final String jobTypeField = "jobType"; private final String errorField = "jobError"; private final String lastTriggeredField = "lastTriggered"; private final String lastCompletedField = "lastCompleted"; private final String firstFailingField = "firstFailing"; private final String lastSuccessField = "lastSuccess"; private final String pausedUntilField = "pausedUntil"; private final String jobRunIdField = "id"; private final String versionField = "version"; private final String revisionField = "revision"; private final String sourceVersionField = "sourceVersion"; private final String sourceApplicationField = "sourceRevision"; private final String reasonField = "reason"; private final String atField = "at"; private final String clusterInfoField = "clusterInfo"; private final String clusterInfoFlavorField = "flavor"; private final String clusterInfoCostField = "cost"; private final String clusterInfoCpuField = "flavorCpu"; private final String clusterInfoMemField = "flavorMem"; private final String clusterInfoDiskField = "flavorDisk"; private final String clusterInfoTypeField = "clusterType"; private final String clusterInfoHostnamesField = "hostnames"; private final String clusterUtilsField = "clusterUtils"; private final String clusterUtilsCpuField = "cpu"; private final String clusterUtilsMemField = "mem"; private final String clusterUtilsDiskField = "disk"; private final String clusterUtilsDiskBusyField = "diskbusy"; private final String deploymentMetricsField = "metrics"; private final String deploymentMetricsQPSField = "queriesPerSecond"; private final String deploymentMetricsWPSField = "writesPerSecond"; private final String deploymentMetricsDocsField = "documentCount"; private final String deploymentMetricsQueryLatencyField = "queryLatencyMillis"; private final String deploymentMetricsWriteLatencyField = "writeLatencyMillis"; private final String deploymentMetricsUpdateTime = "lastUpdated"; private final String deploymentMetricsWarningsField = "warnings"; public Slime toSlime(Application application) { Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString(idField, application.id().serializedForm()); root.setLong(createdAtField, application.createdAt().toEpochMilli()); root.setString(deploymentSpecField, application.deploymentSpec().xmlForm()); root.setString(validationOverridesField, application.validationOverrides().xmlForm()); deploymentsToSlime(application.deployments().values(), root.setArray(deploymentsField)); toSlime(application.deploymentJobs(), root.setObject(deploymentJobsField)); toSlime(application.change(), root, deployingField); toSlime(application.outstandingChange(), root, outstandingChangeField); application.ownershipIssueId().ifPresent(issueId -> root.setString(ownershipIssueIdField, issueId.value())); application.owner().ifPresent(owner -> root.setString(ownerField, owner.username())); application.majorVersion().ifPresent(majorVersion -> root.setLong(majorVersionField, majorVersion)); root.setDouble(queryQualityField, application.metrics().queryServiceQuality()); root.setDouble(writeQualityField, application.metrics().writeServiceQuality()); application.pemDeployKey().ifPresent(pemDeployKey -> root.setString(pemDeployKeyField, pemDeployKey)); application.legacyRotation().ifPresent(rotation -> root.setString(deprecatedRotationField, rotation.asString())); rotationsToSlime(application.assignedRotations(), root, rotationsField); assignedRotationsToSlime(application.assignedRotations(), root, assignedRotationsField); toSlime(application.rotationStatus(), root.setArray(rotationStatusField)); return slime; } private void deploymentsToSlime(Collection<Deployment> deployments, Cursor array) { for (Deployment deployment : deployments) deploymentToSlime(deployment, array.addObject()); } private void deploymentToSlime(Deployment deployment, Cursor object) { zoneIdToSlime(deployment.zone(), object.setObject(zoneField)); object.setString(versionField, deployment.version().toString()); object.setLong(deployTimeField, deployment.at().toEpochMilli()); toSlime(deployment.applicationVersion(), object.setObject(applicationPackageRevisionField)); clusterInfoToSlime(deployment.clusterInfo(), object); clusterUtilsToSlime(deployment.clusterUtils(), object); deploymentMetricsToSlime(deployment.metrics(), object); deployment.activity().lastQueried().ifPresent(instant -> object.setLong(lastQueriedField, instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> object.setLong(lastWrittenField, instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> object.setDouble(lastQueriesPerSecondField, value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> object.setDouble(lastWritesPerSecondField, value)); } private void deploymentMetricsToSlime(DeploymentMetrics metrics, Cursor object) { Cursor root = object.setObject(deploymentMetricsField); root.setDouble(deploymentMetricsQPSField, metrics.queriesPerSecond()); root.setDouble(deploymentMetricsWPSField, metrics.writesPerSecond()); root.setDouble(deploymentMetricsDocsField, metrics.documentCount()); root.setDouble(deploymentMetricsQueryLatencyField, metrics.queryLatencyMillis()); root.setDouble(deploymentMetricsWriteLatencyField, metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> root.setLong(deploymentMetricsUpdateTime, instant.toEpochMilli())); if (!metrics.warnings().isEmpty()) { Cursor warningsObject = root.setObject(deploymentMetricsWarningsField); metrics.warnings().forEach((warning, count) -> warningsObject.setLong(warning.name(), count)); } } private void clusterInfoToSlime(Map<ClusterSpec.Id, ClusterInfo> clusters, Cursor object) { Cursor root = object.setObject(clusterInfoField); for (Map.Entry<ClusterSpec.Id, ClusterInfo> entry : clusters.entrySet()) { toSlime(entry.getValue(), root.setObject(entry.getKey().value())); } } private void toSlime(ClusterInfo info, Cursor object) { object.setString(clusterInfoFlavorField, info.getFlavor()); object.setLong(clusterInfoCostField, info.getFlavorCost()); object.setDouble(clusterInfoCpuField, info.getFlavorCPU()); object.setDouble(clusterInfoMemField, info.getFlavorMem()); object.setDouble(clusterInfoDiskField, info.getFlavorDisk()); object.setString(clusterInfoTypeField, info.getClusterType().name()); Cursor array = object.setArray(clusterInfoHostnamesField); for (String host : info.getHostnames()) { array.addString(host); } } private void clusterUtilsToSlime(Map<ClusterSpec.Id, ClusterUtilization> clusters, Cursor object) { Cursor root = object.setObject(clusterUtilsField); for (Map.Entry<ClusterSpec.Id, ClusterUtilization> entry : clusters.entrySet()) { toSlime(entry.getValue(), root.setObject(entry.getKey().value())); } } private void toSlime(ClusterUtilization utils, Cursor object) { object.setDouble(clusterUtilsCpuField, utils.getCpu()); object.setDouble(clusterUtilsMemField, utils.getMemory()); object.setDouble(clusterUtilsDiskField, utils.getDisk()); object.setDouble(clusterUtilsDiskBusyField, utils.getDiskBusy()); } private void zoneIdToSlime(ZoneId zone, Cursor object) { object.setString(environmentField, zone.environment().value()); object.setString(regionField, zone.region().value()); } private void toSlime(ApplicationVersion applicationVersion, Cursor object) { if (applicationVersion.buildNumber().isPresent() && applicationVersion.source().isPresent()) { object.setLong(applicationBuildNumberField, applicationVersion.buildNumber().getAsLong()); toSlime(applicationVersion.source().get(), object.setObject(sourceRevisionField)); applicationVersion.authorEmail().ifPresent(email -> object.setString(authorEmailField, email)); applicationVersion.compileVersion().ifPresent(version -> object.setString(compileVersionField, version.toString())); applicationVersion.buildTime().ifPresent(time -> object.setLong(buildTimeField, time.toEpochMilli())); } } private void toSlime(SourceRevision sourceRevision, Cursor object) { object.setString(repositoryField, sourceRevision.repository()); object.setString(branchField, sourceRevision.branch()); object.setString(commitField, sourceRevision.commit()); } private void toSlime(DeploymentJobs deploymentJobs, Cursor cursor) { deploymentJobs.projectId().ifPresent(projectId -> cursor.setLong(projectIdField, projectId)); jobStatusToSlime(deploymentJobs.jobStatus().values(), cursor.setArray(jobStatusField)); deploymentJobs.issueId().ifPresent(jiraIssueId -> cursor.setString(issueIdField, jiraIssueId.value())); cursor.setBool(builtInternallyField, deploymentJobs.deployedInternally()); } private void jobStatusToSlime(Collection<JobStatus> jobStatuses, Cursor jobStatusArray) { for (JobStatus jobStatus : jobStatuses) toSlime(jobStatus, jobStatusArray.addObject()); } private void toSlime(JobStatus jobStatus, Cursor object) { object.setString(jobTypeField, jobStatus.type().jobName()); if (jobStatus.jobError().isPresent()) object.setString(errorField, jobStatus.jobError().get().name()); jobStatus.lastTriggered().ifPresent(run -> jobRunToSlime(run, object, lastTriggeredField)); jobStatus.lastCompleted().ifPresent(run -> jobRunToSlime(run, object, lastCompletedField)); jobStatus.lastSuccess().ifPresent(run -> jobRunToSlime(run, object, lastSuccessField)); jobStatus.firstFailing().ifPresent(run -> jobRunToSlime(run, object, firstFailingField)); jobStatus.pausedUntil().ifPresent(until -> object.setLong(pausedUntilField, until)); } private void jobRunToSlime(JobStatus.JobRun jobRun, Cursor parent, String jobRunObjectName) { Cursor object = parent.setObject(jobRunObjectName); object.setLong(jobRunIdField, jobRun.id()); object.setString(versionField, jobRun.platform().toString()); toSlime(jobRun.application(), object.setObject(revisionField)); jobRun.sourcePlatform().ifPresent(version -> object.setString(sourceVersionField, version.toString())); jobRun.sourceApplication().ifPresent(version -> toSlime(version, object.setObject(sourceApplicationField))); object.setString(reasonField, jobRun.reason()); object.setLong(atField, jobRun.at().toEpochMilli()); } private void toSlime(Change deploying, Cursor parentObject, String fieldName) { if (deploying.isEmpty()) return; Cursor object = parentObject.setObject(fieldName); if (deploying.platform().isPresent()) object.setString(versionField, deploying.platform().get().toString()); if (deploying.application().isPresent()) toSlime(deploying.application().get(), object); if (deploying.isPinned()) object.setBool(pinnedField, true); } private void toSlime(Map<HostName, RotationStatus> rotationStatus, Cursor array) { rotationStatus.forEach((hostname, status) -> { Cursor object = array.addObject(); object.setString("hostname", hostname.value()); object.setString("status", status.name()); }); } private void rotationsToSlime(List<AssignedRotation> rotations, Cursor parent, String fieldName) { final var rotationsArray = parent.setArray(fieldName); rotations.forEach(rot -> rotationsArray.addString(rot.rotationId().asString())); } public Application fromSlime(Slime slime) { Inspector root = slime.get(); ApplicationId id = ApplicationId.fromSerializedForm(root.field(idField).asString()); Instant createdAt = Instant.ofEpochMilli(root.field(createdAtField).asLong()); DeploymentSpec deploymentSpec = DeploymentSpec.fromXml(root.field(deploymentSpecField).asString(), false); ValidationOverrides validationOverrides = ValidationOverrides.fromXml(root.field(validationOverridesField).asString()); List<Deployment> deployments = deploymentsFromSlime(root.field(deploymentsField)); DeploymentJobs deploymentJobs = deploymentJobsFromSlime(root.field(deploymentJobsField)); Change deploying = changeFromSlime(root.field(deployingField)); Change outstandingChange = changeFromSlime(root.field(outstandingChangeField)); Optional<IssueId> ownershipIssueId = optionalString(root.field(ownershipIssueIdField)).map(IssueId::from); Optional<User> owner = optionalString(root.field(ownerField)).map(User::from); OptionalInt majorVersion = optionalInteger(root.field(majorVersionField)); ApplicationMetrics metrics = new ApplicationMetrics(root.field(queryQualityField).asDouble(), root.field(writeQualityField).asDouble()); Optional<String> pemDeployKey = optionalString(root.field(pemDeployKeyField)); List<AssignedRotation> assignedRotations = assignedRotationsFromSlime(deploymentSpec, root); Map<HostName, RotationStatus> rotationStatus = rotationStatusFromSlime(root.field(rotationStatusField)); return new Application(id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, deploying, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, assignedRotations, rotationStatus); } private List<Deployment> deploymentsFromSlime(Inspector array) { List<Deployment> deployments = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> deployments.add(deploymentFromSlime(item))); return deployments; } private Deployment deploymentFromSlime(Inspector deploymentObject) { return new Deployment(zoneIdFromSlime(deploymentObject.field(zoneField)), applicationVersionFromSlime(deploymentObject.field(applicationPackageRevisionField)), Version.fromString(deploymentObject.field(versionField).asString()), Instant.ofEpochMilli(deploymentObject.field(deployTimeField).asLong()), clusterUtilsMapFromSlime(deploymentObject.field(clusterUtilsField)), clusterInfoMapFromSlime(deploymentObject.field(clusterInfoField)), deploymentMetricsFromSlime(deploymentObject.field(deploymentMetricsField)), DeploymentActivity.create(optionalInstant(deploymentObject.field(lastQueriedField)), optionalInstant(deploymentObject.field(lastWrittenField)), optionalDouble(deploymentObject.field(lastQueriesPerSecondField)), optionalDouble(deploymentObject.field(lastWritesPerSecondField)))); } private DeploymentMetrics deploymentMetricsFromSlime(Inspector object) { Optional<Instant> instant = object.field(deploymentMetricsUpdateTime).valid() ? Optional.of(Instant.ofEpochMilli(object.field(deploymentMetricsUpdateTime).asLong())) : Optional.empty(); return new DeploymentMetrics(object.field(deploymentMetricsQPSField).asDouble(), object.field(deploymentMetricsWPSField).asDouble(), object.field(deploymentMetricsDocsField).asDouble(), object.field(deploymentMetricsQueryLatencyField).asDouble(), object.field(deploymentMetricsWriteLatencyField).asDouble(), instant, deploymentWarningsFrom(object.field(deploymentMetricsWarningsField))); } private Map<DeploymentMetrics.Warning, Integer> deploymentWarningsFrom(Inspector object) { Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>(); object.traverse((ObjectTraverser) (name, value) -> warnings.put(DeploymentMetrics.Warning.valueOf(name), (int) value.asLong())); return Collections.unmodifiableMap(warnings); } private Map<HostName, RotationStatus> rotationStatusFromSlime(Inspector object) { if (!object.valid()) { return Collections.emptyMap(); } Map<HostName, RotationStatus> rotationStatus = new TreeMap<>(); object.traverse((ArrayTraverser) (idx, inspect) -> { HostName hostname = HostName.from(inspect.field("hostname").asString()); RotationStatus status = RotationStatus.valueOf(inspect.field("status").asString()); rotationStatus.put(hostname, status); }); return Collections.unmodifiableMap(rotationStatus); } private Map<ClusterSpec.Id, ClusterInfo> clusterInfoMapFromSlime (Inspector object) { Map<ClusterSpec.Id, ClusterInfo> map = new HashMap<>(); object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterInfoFromSlime(value))); return map; } private Map<ClusterSpec.Id, ClusterUtilization> clusterUtilsMapFromSlime(Inspector object) { Map<ClusterSpec.Id, ClusterUtilization> map = new HashMap<>(); object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterUtililzationFromSlime(value))); return map; } private ClusterUtilization clusterUtililzationFromSlime(Inspector object) { double cpu = object.field(clusterUtilsCpuField).asDouble(); double mem = object.field(clusterUtilsMemField).asDouble(); double disk = object.field(clusterUtilsDiskField).asDouble(); double diskBusy = object.field(clusterUtilsDiskBusyField).asDouble(); return new ClusterUtilization(mem, cpu, disk, diskBusy); } private ClusterInfo clusterInfoFromSlime(Inspector inspector) { String flavor = inspector.field(clusterInfoFlavorField).asString(); int cost = (int)inspector.field(clusterInfoCostField).asLong(); String type = inspector.field(clusterInfoTypeField).asString(); double flavorCpu = inspector.field(clusterInfoCpuField).asDouble(); double flavorMem = inspector.field(clusterInfoMemField).asDouble(); double flavorDisk = inspector.field(clusterInfoDiskField).asDouble(); List<String> hostnames = new ArrayList<>(); inspector.field(clusterInfoHostnamesField).traverse((ArrayTraverser)(int index, Inspector value) -> hostnames.add(value.asString())); return new ClusterInfo(flavor, cost, flavorCpu, flavorMem, flavorDisk, ClusterSpec.Type.from(type), hostnames); } private ZoneId zoneIdFromSlime(Inspector object) { return ZoneId.from(object.field(environmentField).asString(), object.field(regionField).asString()); } private ApplicationVersion applicationVersionFromSlime(Inspector object) { if ( ! object.valid()) return ApplicationVersion.unknown; OptionalLong applicationBuildNumber = optionalLong(object.field(applicationBuildNumberField)); Optional<SourceRevision> sourceRevision = sourceRevisionFromSlime(object.field(sourceRevisionField)); if ( ! sourceRevision.isPresent() || ! applicationBuildNumber.isPresent()) { return ApplicationVersion.unknown; } Optional<String> authorEmail = optionalString(object.field(authorEmailField)); Optional<Version> compileVersion = optionalString(object.field(compileVersionField)).map(Version::fromString); Optional<Instant> buildTime = optionalInstant(object.field(buildTimeField)); if ( ! authorEmail.isPresent()) return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong()); if ( ! compileVersion.isPresent() || ! buildTime.isPresent()) return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong(), authorEmail.get()); return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong(), authorEmail.get(), compileVersion.get(), buildTime.get()); } private Optional<SourceRevision> sourceRevisionFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new SourceRevision(object.field(repositoryField).asString(), object.field(branchField).asString(), object.field(commitField).asString())); } private DeploymentJobs deploymentJobsFromSlime(Inspector object) { OptionalLong projectId = optionalLong(object.field(projectIdField)); List<JobStatus> jobStatusList = jobStatusListFromSlime(object.field(jobStatusField)); Optional<IssueId> issueId = optionalString(object.field(issueIdField)).map(IssueId::from); boolean builtInternally = object.field(builtInternallyField).asBool(); return new DeploymentJobs(projectId, jobStatusList, issueId, builtInternally); } private Change changeFromSlime(Inspector object) { if ( ! object.valid()) return Change.empty(); Inspector versionFieldValue = object.field(versionField); Change change = Change.empty(); if (versionFieldValue.valid()) change = Change.of(Version.fromString(versionFieldValue.asString())); if (object.field(applicationBuildNumberField).valid()) change = change.with(applicationVersionFromSlime(object)); if (object.field(pinnedField).asBool()) change = change.withPin(); return change; } private List<JobStatus> jobStatusListFromSlime(Inspector array) { List<JobStatus> jobStatusList = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> jobStatusFromSlime(item).ifPresent(jobStatusList::add)); return jobStatusList; } private Optional<JobStatus> jobStatusFromSlime(Inspector object) { Optional<JobType> jobType = JobType.fromOptionalJobName(object.field(jobTypeField).asString()); if (! jobType.isPresent()) return Optional.empty(); Optional<JobError> jobError = Optional.empty(); if (object.field(errorField).valid()) jobError = Optional.of(JobError.valueOf(object.field(errorField).asString())); return Optional.of(new JobStatus(jobType.get(), jobError, jobRunFromSlime(object.field(lastTriggeredField)), jobRunFromSlime(object.field(lastCompletedField)), jobRunFromSlime(object.field(firstFailingField)), jobRunFromSlime(object.field(lastSuccessField)), optionalLong(object.field(pausedUntilField)))); } private Optional<JobStatus.JobRun> jobRunFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new JobStatus.JobRun(object.field(jobRunIdField).asLong(), new Version(object.field(versionField).asString()), applicationVersionFromSlime(object.field(revisionField)), optionalString(object.field(sourceVersionField)).map(Version::fromString), Optional.of(object.field(sourceApplicationField)).filter(Inspector::valid).map(this::applicationVersionFromSlime), object.field(reasonField).asString(), Instant.ofEpochMilli(object.field(atField).asLong()))); } private List<AssignedRotation> assignedRotationsFromSlime(DeploymentSpec deploymentSpec, Inspector root) { final var assignedRotations = new LinkedHashSet<AssignedRotation>(); final var legacyRotation = legacyRotationFromSlime(root.field(deprecatedRotationField)); if (legacyRotation.isPresent() && deploymentSpec.globalServiceId().isPresent()) { final var clusterId = new ClusterSpec.Id(deploymentSpec.globalServiceId().get()); assignedRotations.add(new AssignedRotation(clusterId, EndpointId.default_(), legacyRotation.get())); } final var rotations = rotationListFromSlime(root.field(rotationsField)); for (var rotation : rotations) { if (deploymentSpec.globalServiceId().isPresent()) { final var clusterId = new ClusterSpec.Id(deploymentSpec.globalServiceId().get()); assignedRotations.add(new AssignedRotation(clusterId, EndpointId.default_(), rotation)); } } root.field(assignedRotationsField).traverse((ArrayTraverser) (idx, inspector) -> { final var clusterId = new ClusterSpec.Id(inspector.field("container").asString()); final var endpointId = EndpointId.of(inspector.field("endpoint").asString()); final var rotationId = new RotationId(inspector.field("rotation").asString()); assignedRotations.add(new AssignedRotation(clusterId, endpointId, rotationId)); }); return List.copyOf(assignedRotations); } private List<RotationId> rotationListFromSlime(Inspector field) { final var rotations = new ArrayList<RotationId>(); field.traverse((ArrayTraverser) (idx, inspector) -> { final var rotation = new RotationId(inspector.asString()); rotations.add(rotation); }); return rotations; } private Optional<RotationId> legacyRotationFromSlime(Inspector field) { return field.valid() ? optionalString(field).map(RotationId::new) : Optional.empty(); } private OptionalLong optionalLong(Inspector field) { return field.valid() ? OptionalLong.of(field.asLong()) : OptionalLong.empty(); } private OptionalInt optionalInteger(Inspector field) { return field.valid() ? OptionalInt.of((int) field.asLong()) : OptionalInt.empty(); } private OptionalDouble optionalDouble(Inspector field) { return field.valid() ? OptionalDouble.of(field.asDouble()) : OptionalDouble.empty(); } private Optional<String> optionalString(Inspector field) { return SlimeUtils.optionalString(field); } private Optional<Instant> optionalInstant(Inspector field) { OptionalLong value = optionalLong(field); return value.isPresent() ? Optional.of(Instant.ofEpochMilli(value.getAsLong())) : Optional.empty(); } }
class ApplicationSerializer { private final String idField = "id"; private final String createdAtField = "createdAt"; private final String deploymentSpecField = "deploymentSpecField"; private final String validationOverridesField = "validationOverrides"; private final String deploymentsField = "deployments"; private final String deploymentJobsField = "deploymentJobs"; private final String deployingField = "deployingField"; private final String pinnedField = "pinned"; private final String outstandingChangeField = "outstandingChangeField"; private final String ownershipIssueIdField = "ownershipIssueId"; private final String ownerField = "confirmedOwner"; private final String majorVersionField = "majorVersion"; private final String writeQualityField = "writeQuality"; private final String queryQualityField = "queryQuality"; private final String pemDeployKeyField = "pemDeployKey"; private final String assignedRotationsField = "assignedRotations"; private final String assignedRotationEndpointField = "endpointId"; private final String assignedRotationClusterField = "clusterId"; private final String assignedRotationRotationField = "rotationId"; private final String rotationsField = "endpoints"; private final String deprecatedRotationField = "rotation"; private final String rotationStatusField = "rotationStatus"; private final String zoneField = "zone"; private final String environmentField = "environment"; private final String regionField = "region"; private final String deployTimeField = "deployTime"; private final String applicationBuildNumberField = "applicationBuildNumber"; private final String applicationPackageRevisionField = "applicationPackageRevision"; private final String sourceRevisionField = "sourceRevision"; private final String repositoryField = "repositoryField"; private final String branchField = "branchField"; private final String commitField = "commitField"; private final String authorEmailField = "authorEmailField"; private final String compileVersionField = "compileVersion"; private final String buildTimeField = "buildTime"; private final String lastQueriedField = "lastQueried"; private final String lastWrittenField = "lastWritten"; private final String lastQueriesPerSecondField = "lastQueriesPerSecond"; private final String lastWritesPerSecondField = "lastWritesPerSecond"; private final String projectIdField = "projectId"; private final String jobStatusField = "jobStatus"; private final String issueIdField = "jiraIssueId"; private final String builtInternallyField = "builtInternally"; private final String jobTypeField = "jobType"; private final String errorField = "jobError"; private final String lastTriggeredField = "lastTriggered"; private final String lastCompletedField = "lastCompleted"; private final String firstFailingField = "firstFailing"; private final String lastSuccessField = "lastSuccess"; private final String pausedUntilField = "pausedUntil"; private final String jobRunIdField = "id"; private final String versionField = "version"; private final String revisionField = "revision"; private final String sourceVersionField = "sourceVersion"; private final String sourceApplicationField = "sourceRevision"; private final String reasonField = "reason"; private final String atField = "at"; private final String clusterInfoField = "clusterInfo"; private final String clusterInfoFlavorField = "flavor"; private final String clusterInfoCostField = "cost"; private final String clusterInfoCpuField = "flavorCpu"; private final String clusterInfoMemField = "flavorMem"; private final String clusterInfoDiskField = "flavorDisk"; private final String clusterInfoTypeField = "clusterType"; private final String clusterInfoHostnamesField = "hostnames"; private final String clusterUtilsField = "clusterUtils"; private final String clusterUtilsCpuField = "cpu"; private final String clusterUtilsMemField = "mem"; private final String clusterUtilsDiskField = "disk"; private final String clusterUtilsDiskBusyField = "diskbusy"; private final String deploymentMetricsField = "metrics"; private final String deploymentMetricsQPSField = "queriesPerSecond"; private final String deploymentMetricsWPSField = "writesPerSecond"; private final String deploymentMetricsDocsField = "documentCount"; private final String deploymentMetricsQueryLatencyField = "queryLatencyMillis"; private final String deploymentMetricsWriteLatencyField = "writeLatencyMillis"; private final String deploymentMetricsUpdateTime = "lastUpdated"; private final String deploymentMetricsWarningsField = "warnings"; public Slime toSlime(Application application) { Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString(idField, application.id().serializedForm()); root.setLong(createdAtField, application.createdAt().toEpochMilli()); root.setString(deploymentSpecField, application.deploymentSpec().xmlForm()); root.setString(validationOverridesField, application.validationOverrides().xmlForm()); deploymentsToSlime(application.deployments().values(), root.setArray(deploymentsField)); toSlime(application.deploymentJobs(), root.setObject(deploymentJobsField)); toSlime(application.change(), root, deployingField); toSlime(application.outstandingChange(), root, outstandingChangeField); application.ownershipIssueId().ifPresent(issueId -> root.setString(ownershipIssueIdField, issueId.value())); application.owner().ifPresent(owner -> root.setString(ownerField, owner.username())); application.majorVersion().ifPresent(majorVersion -> root.setLong(majorVersionField, majorVersion)); root.setDouble(queryQualityField, application.metrics().queryServiceQuality()); root.setDouble(writeQualityField, application.metrics().writeServiceQuality()); application.pemDeployKey().ifPresent(pemDeployKey -> root.setString(pemDeployKeyField, pemDeployKey)); application.legacyRotation().ifPresent(rotation -> root.setString(deprecatedRotationField, rotation.asString())); rotationsToSlime(application.assignedRotations(), root, rotationsField); assignedRotationsToSlime(application.assignedRotations(), root, assignedRotationsField); toSlime(application.rotationStatus(), root.setArray(rotationStatusField)); return slime; } private void deploymentsToSlime(Collection<Deployment> deployments, Cursor array) { for (Deployment deployment : deployments) deploymentToSlime(deployment, array.addObject()); } private void deploymentToSlime(Deployment deployment, Cursor object) { zoneIdToSlime(deployment.zone(), object.setObject(zoneField)); object.setString(versionField, deployment.version().toString()); object.setLong(deployTimeField, deployment.at().toEpochMilli()); toSlime(deployment.applicationVersion(), object.setObject(applicationPackageRevisionField)); clusterInfoToSlime(deployment.clusterInfo(), object); clusterUtilsToSlime(deployment.clusterUtils(), object); deploymentMetricsToSlime(deployment.metrics(), object); deployment.activity().lastQueried().ifPresent(instant -> object.setLong(lastQueriedField, instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> object.setLong(lastWrittenField, instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> object.setDouble(lastQueriesPerSecondField, value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> object.setDouble(lastWritesPerSecondField, value)); } private void deploymentMetricsToSlime(DeploymentMetrics metrics, Cursor object) { Cursor root = object.setObject(deploymentMetricsField); root.setDouble(deploymentMetricsQPSField, metrics.queriesPerSecond()); root.setDouble(deploymentMetricsWPSField, metrics.writesPerSecond()); root.setDouble(deploymentMetricsDocsField, metrics.documentCount()); root.setDouble(deploymentMetricsQueryLatencyField, metrics.queryLatencyMillis()); root.setDouble(deploymentMetricsWriteLatencyField, metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> root.setLong(deploymentMetricsUpdateTime, instant.toEpochMilli())); if (!metrics.warnings().isEmpty()) { Cursor warningsObject = root.setObject(deploymentMetricsWarningsField); metrics.warnings().forEach((warning, count) -> warningsObject.setLong(warning.name(), count)); } } private void clusterInfoToSlime(Map<ClusterSpec.Id, ClusterInfo> clusters, Cursor object) { Cursor root = object.setObject(clusterInfoField); for (Map.Entry<ClusterSpec.Id, ClusterInfo> entry : clusters.entrySet()) { toSlime(entry.getValue(), root.setObject(entry.getKey().value())); } } private void toSlime(ClusterInfo info, Cursor object) { object.setString(clusterInfoFlavorField, info.getFlavor()); object.setLong(clusterInfoCostField, info.getFlavorCost()); object.setDouble(clusterInfoCpuField, info.getFlavorCPU()); object.setDouble(clusterInfoMemField, info.getFlavorMem()); object.setDouble(clusterInfoDiskField, info.getFlavorDisk()); object.setString(clusterInfoTypeField, info.getClusterType().name()); Cursor array = object.setArray(clusterInfoHostnamesField); for (String host : info.getHostnames()) { array.addString(host); } } private void clusterUtilsToSlime(Map<ClusterSpec.Id, ClusterUtilization> clusters, Cursor object) { Cursor root = object.setObject(clusterUtilsField); for (Map.Entry<ClusterSpec.Id, ClusterUtilization> entry : clusters.entrySet()) { toSlime(entry.getValue(), root.setObject(entry.getKey().value())); } } private void toSlime(ClusterUtilization utils, Cursor object) { object.setDouble(clusterUtilsCpuField, utils.getCpu()); object.setDouble(clusterUtilsMemField, utils.getMemory()); object.setDouble(clusterUtilsDiskField, utils.getDisk()); object.setDouble(clusterUtilsDiskBusyField, utils.getDiskBusy()); } private void zoneIdToSlime(ZoneId zone, Cursor object) { object.setString(environmentField, zone.environment().value()); object.setString(regionField, zone.region().value()); } private void toSlime(ApplicationVersion applicationVersion, Cursor object) { if (applicationVersion.buildNumber().isPresent() && applicationVersion.source().isPresent()) { object.setLong(applicationBuildNumberField, applicationVersion.buildNumber().getAsLong()); toSlime(applicationVersion.source().get(), object.setObject(sourceRevisionField)); applicationVersion.authorEmail().ifPresent(email -> object.setString(authorEmailField, email)); applicationVersion.compileVersion().ifPresent(version -> object.setString(compileVersionField, version.toString())); applicationVersion.buildTime().ifPresent(time -> object.setLong(buildTimeField, time.toEpochMilli())); } } private void toSlime(SourceRevision sourceRevision, Cursor object) { object.setString(repositoryField, sourceRevision.repository()); object.setString(branchField, sourceRevision.branch()); object.setString(commitField, sourceRevision.commit()); } private void toSlime(DeploymentJobs deploymentJobs, Cursor cursor) { deploymentJobs.projectId().ifPresent(projectId -> cursor.setLong(projectIdField, projectId)); jobStatusToSlime(deploymentJobs.jobStatus().values(), cursor.setArray(jobStatusField)); deploymentJobs.issueId().ifPresent(jiraIssueId -> cursor.setString(issueIdField, jiraIssueId.value())); cursor.setBool(builtInternallyField, deploymentJobs.deployedInternally()); } private void jobStatusToSlime(Collection<JobStatus> jobStatuses, Cursor jobStatusArray) { for (JobStatus jobStatus : jobStatuses) toSlime(jobStatus, jobStatusArray.addObject()); } private void toSlime(JobStatus jobStatus, Cursor object) { object.setString(jobTypeField, jobStatus.type().jobName()); if (jobStatus.jobError().isPresent()) object.setString(errorField, jobStatus.jobError().get().name()); jobStatus.lastTriggered().ifPresent(run -> jobRunToSlime(run, object, lastTriggeredField)); jobStatus.lastCompleted().ifPresent(run -> jobRunToSlime(run, object, lastCompletedField)); jobStatus.lastSuccess().ifPresent(run -> jobRunToSlime(run, object, lastSuccessField)); jobStatus.firstFailing().ifPresent(run -> jobRunToSlime(run, object, firstFailingField)); jobStatus.pausedUntil().ifPresent(until -> object.setLong(pausedUntilField, until)); } private void jobRunToSlime(JobStatus.JobRun jobRun, Cursor parent, String jobRunObjectName) { Cursor object = parent.setObject(jobRunObjectName); object.setLong(jobRunIdField, jobRun.id()); object.setString(versionField, jobRun.platform().toString()); toSlime(jobRun.application(), object.setObject(revisionField)); jobRun.sourcePlatform().ifPresent(version -> object.setString(sourceVersionField, version.toString())); jobRun.sourceApplication().ifPresent(version -> toSlime(version, object.setObject(sourceApplicationField))); object.setString(reasonField, jobRun.reason()); object.setLong(atField, jobRun.at().toEpochMilli()); } private void toSlime(Change deploying, Cursor parentObject, String fieldName) { if (deploying.isEmpty()) return; Cursor object = parentObject.setObject(fieldName); if (deploying.platform().isPresent()) object.setString(versionField, deploying.platform().get().toString()); if (deploying.application().isPresent()) toSlime(deploying.application().get(), object); if (deploying.isPinned()) object.setBool(pinnedField, true); } private void toSlime(Map<HostName, RotationStatus> rotationStatus, Cursor array) { rotationStatus.forEach((hostname, status) -> { Cursor object = array.addObject(); object.setString("hostname", hostname.value()); object.setString("status", status.name()); }); } private void rotationsToSlime(List<AssignedRotation> rotations, Cursor parent, String fieldName) { final var rotationsArray = parent.setArray(fieldName); rotations.forEach(rot -> rotationsArray.addString(rot.rotationId().asString())); } public Application fromSlime(Slime slime) { Inspector root = slime.get(); ApplicationId id = ApplicationId.fromSerializedForm(root.field(idField).asString()); Instant createdAt = Instant.ofEpochMilli(root.field(createdAtField).asLong()); DeploymentSpec deploymentSpec = DeploymentSpec.fromXml(root.field(deploymentSpecField).asString(), false); ValidationOverrides validationOverrides = ValidationOverrides.fromXml(root.field(validationOverridesField).asString()); List<Deployment> deployments = deploymentsFromSlime(root.field(deploymentsField)); DeploymentJobs deploymentJobs = deploymentJobsFromSlime(root.field(deploymentJobsField)); Change deploying = changeFromSlime(root.field(deployingField)); Change outstandingChange = changeFromSlime(root.field(outstandingChangeField)); Optional<IssueId> ownershipIssueId = optionalString(root.field(ownershipIssueIdField)).map(IssueId::from); Optional<User> owner = optionalString(root.field(ownerField)).map(User::from); OptionalInt majorVersion = optionalInteger(root.field(majorVersionField)); ApplicationMetrics metrics = new ApplicationMetrics(root.field(queryQualityField).asDouble(), root.field(writeQualityField).asDouble()); Optional<String> pemDeployKey = optionalString(root.field(pemDeployKeyField)); List<AssignedRotation> assignedRotations = assignedRotationsFromSlime(deploymentSpec, root); Map<HostName, RotationStatus> rotationStatus = rotationStatusFromSlime(root.field(rotationStatusField)); return new Application(id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, deploying, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, assignedRotations, rotationStatus); } private List<Deployment> deploymentsFromSlime(Inspector array) { List<Deployment> deployments = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> deployments.add(deploymentFromSlime(item))); return deployments; } private Deployment deploymentFromSlime(Inspector deploymentObject) { return new Deployment(zoneIdFromSlime(deploymentObject.field(zoneField)), applicationVersionFromSlime(deploymentObject.field(applicationPackageRevisionField)), Version.fromString(deploymentObject.field(versionField).asString()), Instant.ofEpochMilli(deploymentObject.field(deployTimeField).asLong()), clusterUtilsMapFromSlime(deploymentObject.field(clusterUtilsField)), clusterInfoMapFromSlime(deploymentObject.field(clusterInfoField)), deploymentMetricsFromSlime(deploymentObject.field(deploymentMetricsField)), DeploymentActivity.create(optionalInstant(deploymentObject.field(lastQueriedField)), optionalInstant(deploymentObject.field(lastWrittenField)), optionalDouble(deploymentObject.field(lastQueriesPerSecondField)), optionalDouble(deploymentObject.field(lastWritesPerSecondField)))); } private DeploymentMetrics deploymentMetricsFromSlime(Inspector object) { Optional<Instant> instant = object.field(deploymentMetricsUpdateTime).valid() ? Optional.of(Instant.ofEpochMilli(object.field(deploymentMetricsUpdateTime).asLong())) : Optional.empty(); return new DeploymentMetrics(object.field(deploymentMetricsQPSField).asDouble(), object.field(deploymentMetricsWPSField).asDouble(), object.field(deploymentMetricsDocsField).asDouble(), object.field(deploymentMetricsQueryLatencyField).asDouble(), object.field(deploymentMetricsWriteLatencyField).asDouble(), instant, deploymentWarningsFrom(object.field(deploymentMetricsWarningsField))); } private Map<DeploymentMetrics.Warning, Integer> deploymentWarningsFrom(Inspector object) { Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>(); object.traverse((ObjectTraverser) (name, value) -> warnings.put(DeploymentMetrics.Warning.valueOf(name), (int) value.asLong())); return Collections.unmodifiableMap(warnings); } private Map<HostName, RotationStatus> rotationStatusFromSlime(Inspector object) { if (!object.valid()) { return Collections.emptyMap(); } Map<HostName, RotationStatus> rotationStatus = new TreeMap<>(); object.traverse((ArrayTraverser) (idx, inspect) -> { HostName hostname = HostName.from(inspect.field("hostname").asString()); RotationStatus status = RotationStatus.valueOf(inspect.field("status").asString()); rotationStatus.put(hostname, status); }); return Collections.unmodifiableMap(rotationStatus); } private Map<ClusterSpec.Id, ClusterInfo> clusterInfoMapFromSlime (Inspector object) { Map<ClusterSpec.Id, ClusterInfo> map = new HashMap<>(); object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterInfoFromSlime(value))); return map; } private Map<ClusterSpec.Id, ClusterUtilization> clusterUtilsMapFromSlime(Inspector object) { Map<ClusterSpec.Id, ClusterUtilization> map = new HashMap<>(); object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterUtililzationFromSlime(value))); return map; } private ClusterUtilization clusterUtililzationFromSlime(Inspector object) { double cpu = object.field(clusterUtilsCpuField).asDouble(); double mem = object.field(clusterUtilsMemField).asDouble(); double disk = object.field(clusterUtilsDiskField).asDouble(); double diskBusy = object.field(clusterUtilsDiskBusyField).asDouble(); return new ClusterUtilization(mem, cpu, disk, diskBusy); } private ClusterInfo clusterInfoFromSlime(Inspector inspector) { String flavor = inspector.field(clusterInfoFlavorField).asString(); int cost = (int)inspector.field(clusterInfoCostField).asLong(); String type = inspector.field(clusterInfoTypeField).asString(); double flavorCpu = inspector.field(clusterInfoCpuField).asDouble(); double flavorMem = inspector.field(clusterInfoMemField).asDouble(); double flavorDisk = inspector.field(clusterInfoDiskField).asDouble(); List<String> hostnames = new ArrayList<>(); inspector.field(clusterInfoHostnamesField).traverse((ArrayTraverser)(int index, Inspector value) -> hostnames.add(value.asString())); return new ClusterInfo(flavor, cost, flavorCpu, flavorMem, flavorDisk, ClusterSpec.Type.from(type), hostnames); } private ZoneId zoneIdFromSlime(Inspector object) { return ZoneId.from(object.field(environmentField).asString(), object.field(regionField).asString()); } private ApplicationVersion applicationVersionFromSlime(Inspector object) { if ( ! object.valid()) return ApplicationVersion.unknown; OptionalLong applicationBuildNumber = optionalLong(object.field(applicationBuildNumberField)); Optional<SourceRevision> sourceRevision = sourceRevisionFromSlime(object.field(sourceRevisionField)); if ( ! sourceRevision.isPresent() || ! applicationBuildNumber.isPresent()) { return ApplicationVersion.unknown; } Optional<String> authorEmail = optionalString(object.field(authorEmailField)); Optional<Version> compileVersion = optionalString(object.field(compileVersionField)).map(Version::fromString); Optional<Instant> buildTime = optionalInstant(object.field(buildTimeField)); if ( ! authorEmail.isPresent()) return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong()); if ( ! compileVersion.isPresent() || ! buildTime.isPresent()) return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong(), authorEmail.get()); return ApplicationVersion.from(sourceRevision.get(), applicationBuildNumber.getAsLong(), authorEmail.get(), compileVersion.get(), buildTime.get()); } private Optional<SourceRevision> sourceRevisionFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new SourceRevision(object.field(repositoryField).asString(), object.field(branchField).asString(), object.field(commitField).asString())); } private DeploymentJobs deploymentJobsFromSlime(Inspector object) { OptionalLong projectId = optionalLong(object.field(projectIdField)); List<JobStatus> jobStatusList = jobStatusListFromSlime(object.field(jobStatusField)); Optional<IssueId> issueId = optionalString(object.field(issueIdField)).map(IssueId::from); boolean builtInternally = object.field(builtInternallyField).asBool(); return new DeploymentJobs(projectId, jobStatusList, issueId, builtInternally); } private Change changeFromSlime(Inspector object) { if ( ! object.valid()) return Change.empty(); Inspector versionFieldValue = object.field(versionField); Change change = Change.empty(); if (versionFieldValue.valid()) change = Change.of(Version.fromString(versionFieldValue.asString())); if (object.field(applicationBuildNumberField).valid()) change = change.with(applicationVersionFromSlime(object)); if (object.field(pinnedField).asBool()) change = change.withPin(); return change; } private List<JobStatus> jobStatusListFromSlime(Inspector array) { List<JobStatus> jobStatusList = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> jobStatusFromSlime(item).ifPresent(jobStatusList::add)); return jobStatusList; } private Optional<JobStatus> jobStatusFromSlime(Inspector object) { Optional<JobType> jobType = JobType.fromOptionalJobName(object.field(jobTypeField).asString()); if (! jobType.isPresent()) return Optional.empty(); Optional<JobError> jobError = Optional.empty(); if (object.field(errorField).valid()) jobError = Optional.of(JobError.valueOf(object.field(errorField).asString())); return Optional.of(new JobStatus(jobType.get(), jobError, jobRunFromSlime(object.field(lastTriggeredField)), jobRunFromSlime(object.field(lastCompletedField)), jobRunFromSlime(object.field(firstFailingField)), jobRunFromSlime(object.field(lastSuccessField)), optionalLong(object.field(pausedUntilField)))); } private Optional<JobStatus.JobRun> jobRunFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new JobStatus.JobRun(object.field(jobRunIdField).asLong(), new Version(object.field(versionField).asString()), applicationVersionFromSlime(object.field(revisionField)), optionalString(object.field(sourceVersionField)).map(Version::fromString), Optional.of(object.field(sourceApplicationField)).filter(Inspector::valid).map(this::applicationVersionFromSlime), object.field(reasonField).asString(), Instant.ofEpochMilli(object.field(atField).asLong()))); } private List<AssignedRotation> assignedRotationsFromSlime(DeploymentSpec deploymentSpec, Inspector root) { final var assignedRotations = new LinkedHashSet<AssignedRotation>(); final var legacyRotation = legacyRotationFromSlime(root.field(deprecatedRotationField)); if (legacyRotation.isPresent() && deploymentSpec.globalServiceId().isPresent()) { final var clusterId = new ClusterSpec.Id(deploymentSpec.globalServiceId().get()); assignedRotations.add(new AssignedRotation(clusterId, EndpointId.default_(), legacyRotation.get())); } final var rotations = rotationListFromSlime(root.field(rotationsField)); for (var rotation : rotations) { if (deploymentSpec.globalServiceId().isPresent()) { final var clusterId = new ClusterSpec.Id(deploymentSpec.globalServiceId().get()); assignedRotations.add(new AssignedRotation(clusterId, EndpointId.default_(), rotation)); } } root.field(assignedRotationsField).traverse((ArrayTraverser) (idx, inspector) -> { final var clusterId = new ClusterSpec.Id(inspector.field(assignedRotationClusterField).asString()); final var endpointId = EndpointId.of(inspector.field(assignedRotationEndpointField).asString()); final var rotationId = new RotationId(inspector.field(assignedRotationRotationField).asString()); assignedRotations.add(new AssignedRotation(clusterId, endpointId, rotationId)); }); return List.copyOf(assignedRotations); } private List<RotationId> rotationListFromSlime(Inspector field) { final var rotations = new ArrayList<RotationId>(); field.traverse((ArrayTraverser) (idx, inspector) -> { final var rotation = new RotationId(inspector.asString()); rotations.add(rotation); }); return rotations; } private Optional<RotationId> legacyRotationFromSlime(Inspector field) { return field.valid() ? optionalString(field).map(RotationId::new) : Optional.empty(); } private OptionalLong optionalLong(Inspector field) { return field.valid() ? OptionalLong.of(field.asLong()) : OptionalLong.empty(); } private OptionalInt optionalInteger(Inspector field) { return field.valid() ? OptionalInt.of((int) field.asLong()) : OptionalInt.empty(); } private OptionalDouble optionalDouble(Inspector field) { return field.valid() ? OptionalDouble.of(field.asDouble()) : OptionalDouble.empty(); } private Optional<String> optionalString(Inspector field) { return SlimeUtils.optionalString(field); } private Optional<Instant> optionalInstant(Inspector field) { OptionalLong value = optionalLong(field); return value.isPresent() ? Optional.of(Instant.ofEpochMilli(value.getAsLong())) : Optional.empty(); } }
Regardless, I manually reformatted the lines.
public LockedApplication with(List<AssignedRotation> assignedRotations) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, assignedRotations, rotationStatus); }
deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion,
public LockedApplication with(List<AssignedRotation> assignedRotations) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, assignedRotations, rotationStatus); }
class LockedApplication { private final Lock lock; private final ApplicationId id; private final Instant createdAt; private final DeploymentSpec deploymentSpec; private final ValidationOverrides validationOverrides; private final Map<ZoneId, Deployment> deployments; private final DeploymentJobs deploymentJobs; private final Change change; private final Change outstandingChange; private final Optional<IssueId> ownershipIssueId; private final Optional<User> owner; private final OptionalInt majorVersion; private final ApplicationMetrics metrics; private final Optional<String> pemDeployKey; private final List<AssignedRotation> rotations; private final Map<HostName, RotationStatus> rotationStatus; /** * Used to create a locked application * * @param application The application to lock. * @param lock The lock for the application. */ LockedApplication(Application application, Lock lock) { this(Objects.requireNonNull(lock, "lock cannot be null"), application.id(), application.createdAt(), application.deploymentSpec(), application.validationOverrides(), application.deployments(), application.deploymentJobs(), application.change(), application.outstandingChange(), application.ownershipIssueId(), application.owner(), application.majorVersion(), application.metrics(), application.pemDeployKey(), application.assignedRotations(), application.rotationStatus()); } private LockedApplication(Lock lock, ApplicationId id, Instant createdAt, DeploymentSpec deploymentSpec, ValidationOverrides validationOverrides, Map<ZoneId, Deployment> deployments, DeploymentJobs deploymentJobs, Change change, Change outstandingChange, Optional<IssueId> ownershipIssueId, Optional<User> owner, OptionalInt majorVersion, ApplicationMetrics metrics, Optional<String> pemDeployKey, List<AssignedRotation> rotations, Map<HostName, RotationStatus> rotationStatus) { this.lock = lock; this.id = id; this.createdAt = createdAt; this.deploymentSpec = deploymentSpec; this.validationOverrides = validationOverrides; this.deployments = deployments; this.deploymentJobs = deploymentJobs; this.change = change; this.outstandingChange = outstandingChange; this.ownershipIssueId = ownershipIssueId; this.owner = owner; this.majorVersion = majorVersion; this.metrics = metrics; this.pemDeployKey = pemDeployKey; this.rotations = rotations; this.rotationStatus = rotationStatus; } /** Returns a read-only copy of this */ public Application get() { return new Application(id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withBuiltInternally(boolean builtInternally) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.withBuiltInternally(builtInternally), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withProjectId(OptionalLong projectId) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.withProjectId(projectId), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withDeploymentIssueId(IssueId issueId) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.with(issueId), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withJobPause(JobType jobType, OptionalLong pausedUntil) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.withPause(jobType, pausedUntil), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withJobCompletion(long projectId, JobType jobType, JobStatus.JobRun completion, Optional<DeploymentJobs.JobError> jobError) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.withCompletion(projectId, jobType, completion, jobError), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withJobTriggering(JobType jobType, JobStatus.JobRun job) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.withTriggering(jobType, job), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withNewDeployment(ZoneId zone, ApplicationVersion applicationVersion, Version version, Instant instant, Map<DeploymentMetrics.Warning, Integer> warnings) { Deployment previousDeployment = deployments.getOrDefault(zone, new Deployment(zone, applicationVersion, version, instant)); Deployment newDeployment = new Deployment(zone, applicationVersion, version, instant, previousDeployment.clusterUtils(), previousDeployment.clusterInfo(), previousDeployment.metrics().with(warnings), previousDeployment.activity()); return with(newDeployment); } public LockedApplication withClusterUtilization(ZoneId zone, Map<ClusterSpec.Id, ClusterUtilization> clusterUtilization) { Deployment deployment = deployments.get(zone); if (deployment == null) return this; return with(deployment.withClusterUtils(clusterUtilization)); } public LockedApplication withClusterInfo(ZoneId zone, Map<ClusterSpec.Id, ClusterInfo> clusterInfo) { Deployment deployment = deployments.get(zone); if (deployment == null) return this; return with(deployment.withClusterInfo(clusterInfo)); } public LockedApplication recordActivityAt(Instant instant, ZoneId zone) { Deployment deployment = deployments.get(zone); if (deployment == null) return this; return with(deployment.recordActivityAt(instant)); } public LockedApplication with(ZoneId zone, DeploymentMetrics deploymentMetrics) { Deployment deployment = deployments.get(zone); if (deployment == null) return this; return with(deployment.withMetrics(deploymentMetrics)); } public LockedApplication withoutDeploymentIn(ZoneId zone) { Map<ZoneId, Deployment> deployments = new LinkedHashMap<>(this.deployments); deployments.remove(zone); return with(deployments); } public LockedApplication withoutDeploymentJob(JobType jobType) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.without(jobType), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication with(DeploymentSpec deploymentSpec) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication with(ValidationOverrides validationOverrides) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withChange(Change change) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withOutstandingChange(Change outstandingChange) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withOwnershipIssueId(IssueId issueId) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, Optional.ofNullable(issueId), owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withOwner(User owner) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, Optional.ofNullable(owner), majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } /** Set a major version for this, or set to null to remove any major version override */ public LockedApplication withMajorVersion(Integer majorVersion) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion == null ? OptionalInt.empty() : OptionalInt.of(majorVersion), metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication with(MetricsService.ApplicationMetrics metrics) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withPemDeployKey(String pemDeployKey) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, Optional.ofNullable(pemDeployKey), rotations, rotationStatus); } public LockedApplication withRotationStatus(Map<HostName, RotationStatus> rotationStatus) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } /** Don't expose non-leaf sub-objects. */ private LockedApplication with(Deployment deployment) { Map<ZoneId, Deployment> deployments = new LinkedHashMap<>(this.deployments); deployments.put(deployment.zone(), deployment); return with(deployments); } private LockedApplication with(Map<ZoneId, Deployment> deployments) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } @Override public String toString() { return "application '" + id + "'"; } }
class LockedApplication { private final Lock lock; private final ApplicationId id; private final Instant createdAt; private final DeploymentSpec deploymentSpec; private final ValidationOverrides validationOverrides; private final Map<ZoneId, Deployment> deployments; private final DeploymentJobs deploymentJobs; private final Change change; private final Change outstandingChange; private final Optional<IssueId> ownershipIssueId; private final Optional<User> owner; private final OptionalInt majorVersion; private final ApplicationMetrics metrics; private final Optional<String> pemDeployKey; private final List<AssignedRotation> rotations; private final Map<HostName, RotationStatus> rotationStatus; /** * Used to create a locked application * * @param application The application to lock. * @param lock The lock for the application. */ LockedApplication(Application application, Lock lock) { this(Objects.requireNonNull(lock, "lock cannot be null"), application.id(), application.createdAt(), application.deploymentSpec(), application.validationOverrides(), application.deployments(), application.deploymentJobs(), application.change(), application.outstandingChange(), application.ownershipIssueId(), application.owner(), application.majorVersion(), application.metrics(), application.pemDeployKey(), application.assignedRotations(), application.rotationStatus()); } private LockedApplication(Lock lock, ApplicationId id, Instant createdAt, DeploymentSpec deploymentSpec, ValidationOverrides validationOverrides, Map<ZoneId, Deployment> deployments, DeploymentJobs deploymentJobs, Change change, Change outstandingChange, Optional<IssueId> ownershipIssueId, Optional<User> owner, OptionalInt majorVersion, ApplicationMetrics metrics, Optional<String> pemDeployKey, List<AssignedRotation> rotations, Map<HostName, RotationStatus> rotationStatus) { this.lock = lock; this.id = id; this.createdAt = createdAt; this.deploymentSpec = deploymentSpec; this.validationOverrides = validationOverrides; this.deployments = deployments; this.deploymentJobs = deploymentJobs; this.change = change; this.outstandingChange = outstandingChange; this.ownershipIssueId = ownershipIssueId; this.owner = owner; this.majorVersion = majorVersion; this.metrics = metrics; this.pemDeployKey = pemDeployKey; this.rotations = rotations; this.rotationStatus = rotationStatus; } /** Returns a read-only copy of this */ public Application get() { return new Application(id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withBuiltInternally(boolean builtInternally) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.withBuiltInternally(builtInternally), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withProjectId(OptionalLong projectId) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.withProjectId(projectId), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withDeploymentIssueId(IssueId issueId) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.with(issueId), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withJobPause(JobType jobType, OptionalLong pausedUntil) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.withPause(jobType, pausedUntil), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withJobCompletion(long projectId, JobType jobType, JobStatus.JobRun completion, Optional<DeploymentJobs.JobError> jobError) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.withCompletion(projectId, jobType, completion, jobError), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withJobTriggering(JobType jobType, JobStatus.JobRun job) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.withTriggering(jobType, job), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withNewDeployment(ZoneId zone, ApplicationVersion applicationVersion, Version version, Instant instant, Map<DeploymentMetrics.Warning, Integer> warnings) { Deployment previousDeployment = deployments.getOrDefault(zone, new Deployment(zone, applicationVersion, version, instant)); Deployment newDeployment = new Deployment(zone, applicationVersion, version, instant, previousDeployment.clusterUtils(), previousDeployment.clusterInfo(), previousDeployment.metrics().with(warnings), previousDeployment.activity()); return with(newDeployment); } public LockedApplication withClusterUtilization(ZoneId zone, Map<ClusterSpec.Id, ClusterUtilization> clusterUtilization) { Deployment deployment = deployments.get(zone); if (deployment == null) return this; return with(deployment.withClusterUtils(clusterUtilization)); } public LockedApplication withClusterInfo(ZoneId zone, Map<ClusterSpec.Id, ClusterInfo> clusterInfo) { Deployment deployment = deployments.get(zone); if (deployment == null) return this; return with(deployment.withClusterInfo(clusterInfo)); } public LockedApplication recordActivityAt(Instant instant, ZoneId zone) { Deployment deployment = deployments.get(zone); if (deployment == null) return this; return with(deployment.recordActivityAt(instant)); } public LockedApplication with(ZoneId zone, DeploymentMetrics deploymentMetrics) { Deployment deployment = deployments.get(zone); if (deployment == null) return this; return with(deployment.withMetrics(deploymentMetrics)); } public LockedApplication withoutDeploymentIn(ZoneId zone) { Map<ZoneId, Deployment> deployments = new LinkedHashMap<>(this.deployments); deployments.remove(zone); return with(deployments); } public LockedApplication withoutDeploymentJob(JobType jobType) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.without(jobType), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication with(DeploymentSpec deploymentSpec) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication with(ValidationOverrides validationOverrides) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withChange(Change change) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withOutstandingChange(Change outstandingChange) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withOwnershipIssueId(IssueId issueId) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, Optional.ofNullable(issueId), owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withOwner(User owner) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, Optional.ofNullable(owner), majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } /** Set a major version for this, or set to null to remove any major version override */ public LockedApplication withMajorVersion(Integer majorVersion) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion == null ? OptionalInt.empty() : OptionalInt.of(majorVersion), metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication with(MetricsService.ApplicationMetrics metrics) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withPemDeployKey(String pemDeployKey) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, Optional.ofNullable(pemDeployKey), rotations, rotationStatus); } public LockedApplication withRotationStatus(Map<HostName, RotationStatus> rotationStatus) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } /** Don't expose non-leaf sub-objects. */ private LockedApplication with(Deployment deployment) { Map<ZoneId, Deployment> deployments = new LinkedHashMap<>(this.deployments); deployments.put(deployment.zone(), deployment); return with(deployments); } private LockedApplication with(Map<ZoneId, Deployment> deployments) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } @Override public String toString() { return "application '" + id + "'"; } }
> I would rather IntelliJ (or some plugin) deal with alignment instead of me. For this particular case I believe it's: Editor -> Code Style -> Java -> Method call arguments -> Align when multiline.
public LockedApplication with(List<AssignedRotation> assignedRotations) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, assignedRotations, rotationStatus); }
deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion,
public LockedApplication with(List<AssignedRotation> assignedRotations) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, assignedRotations, rotationStatus); }
class LockedApplication { private final Lock lock; private final ApplicationId id; private final Instant createdAt; private final DeploymentSpec deploymentSpec; private final ValidationOverrides validationOverrides; private final Map<ZoneId, Deployment> deployments; private final DeploymentJobs deploymentJobs; private final Change change; private final Change outstandingChange; private final Optional<IssueId> ownershipIssueId; private final Optional<User> owner; private final OptionalInt majorVersion; private final ApplicationMetrics metrics; private final Optional<String> pemDeployKey; private final List<AssignedRotation> rotations; private final Map<HostName, RotationStatus> rotationStatus; /** * Used to create a locked application * * @param application The application to lock. * @param lock The lock for the application. */ LockedApplication(Application application, Lock lock) { this(Objects.requireNonNull(lock, "lock cannot be null"), application.id(), application.createdAt(), application.deploymentSpec(), application.validationOverrides(), application.deployments(), application.deploymentJobs(), application.change(), application.outstandingChange(), application.ownershipIssueId(), application.owner(), application.majorVersion(), application.metrics(), application.pemDeployKey(), application.assignedRotations(), application.rotationStatus()); } private LockedApplication(Lock lock, ApplicationId id, Instant createdAt, DeploymentSpec deploymentSpec, ValidationOverrides validationOverrides, Map<ZoneId, Deployment> deployments, DeploymentJobs deploymentJobs, Change change, Change outstandingChange, Optional<IssueId> ownershipIssueId, Optional<User> owner, OptionalInt majorVersion, ApplicationMetrics metrics, Optional<String> pemDeployKey, List<AssignedRotation> rotations, Map<HostName, RotationStatus> rotationStatus) { this.lock = lock; this.id = id; this.createdAt = createdAt; this.deploymentSpec = deploymentSpec; this.validationOverrides = validationOverrides; this.deployments = deployments; this.deploymentJobs = deploymentJobs; this.change = change; this.outstandingChange = outstandingChange; this.ownershipIssueId = ownershipIssueId; this.owner = owner; this.majorVersion = majorVersion; this.metrics = metrics; this.pemDeployKey = pemDeployKey; this.rotations = rotations; this.rotationStatus = rotationStatus; } /** Returns a read-only copy of this */ public Application get() { return new Application(id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withBuiltInternally(boolean builtInternally) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.withBuiltInternally(builtInternally), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withProjectId(OptionalLong projectId) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.withProjectId(projectId), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withDeploymentIssueId(IssueId issueId) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.with(issueId), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withJobPause(JobType jobType, OptionalLong pausedUntil) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.withPause(jobType, pausedUntil), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withJobCompletion(long projectId, JobType jobType, JobStatus.JobRun completion, Optional<DeploymentJobs.JobError> jobError) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.withCompletion(projectId, jobType, completion, jobError), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withJobTriggering(JobType jobType, JobStatus.JobRun job) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.withTriggering(jobType, job), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withNewDeployment(ZoneId zone, ApplicationVersion applicationVersion, Version version, Instant instant, Map<DeploymentMetrics.Warning, Integer> warnings) { Deployment previousDeployment = deployments.getOrDefault(zone, new Deployment(zone, applicationVersion, version, instant)); Deployment newDeployment = new Deployment(zone, applicationVersion, version, instant, previousDeployment.clusterUtils(), previousDeployment.clusterInfo(), previousDeployment.metrics().with(warnings), previousDeployment.activity()); return with(newDeployment); } public LockedApplication withClusterUtilization(ZoneId zone, Map<ClusterSpec.Id, ClusterUtilization> clusterUtilization) { Deployment deployment = deployments.get(zone); if (deployment == null) return this; return with(deployment.withClusterUtils(clusterUtilization)); } public LockedApplication withClusterInfo(ZoneId zone, Map<ClusterSpec.Id, ClusterInfo> clusterInfo) { Deployment deployment = deployments.get(zone); if (deployment == null) return this; return with(deployment.withClusterInfo(clusterInfo)); } public LockedApplication recordActivityAt(Instant instant, ZoneId zone) { Deployment deployment = deployments.get(zone); if (deployment == null) return this; return with(deployment.recordActivityAt(instant)); } public LockedApplication with(ZoneId zone, DeploymentMetrics deploymentMetrics) { Deployment deployment = deployments.get(zone); if (deployment == null) return this; return with(deployment.withMetrics(deploymentMetrics)); } public LockedApplication withoutDeploymentIn(ZoneId zone) { Map<ZoneId, Deployment> deployments = new LinkedHashMap<>(this.deployments); deployments.remove(zone); return with(deployments); } public LockedApplication withoutDeploymentJob(JobType jobType) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.without(jobType), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication with(DeploymentSpec deploymentSpec) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication with(ValidationOverrides validationOverrides) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withChange(Change change) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withOutstandingChange(Change outstandingChange) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withOwnershipIssueId(IssueId issueId) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, Optional.ofNullable(issueId), owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withOwner(User owner) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, Optional.ofNullable(owner), majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } /** Set a major version for this, or set to null to remove any major version override */ public LockedApplication withMajorVersion(Integer majorVersion) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion == null ? OptionalInt.empty() : OptionalInt.of(majorVersion), metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication with(MetricsService.ApplicationMetrics metrics) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withPemDeployKey(String pemDeployKey) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, Optional.ofNullable(pemDeployKey), rotations, rotationStatus); } public LockedApplication withRotationStatus(Map<HostName, RotationStatus> rotationStatus) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } /** Don't expose non-leaf sub-objects. */ private LockedApplication with(Deployment deployment) { Map<ZoneId, Deployment> deployments = new LinkedHashMap<>(this.deployments); deployments.put(deployment.zone(), deployment); return with(deployments); } private LockedApplication with(Map<ZoneId, Deployment> deployments) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } @Override public String toString() { return "application '" + id + "'"; } }
class LockedApplication { private final Lock lock; private final ApplicationId id; private final Instant createdAt; private final DeploymentSpec deploymentSpec; private final ValidationOverrides validationOverrides; private final Map<ZoneId, Deployment> deployments; private final DeploymentJobs deploymentJobs; private final Change change; private final Change outstandingChange; private final Optional<IssueId> ownershipIssueId; private final Optional<User> owner; private final OptionalInt majorVersion; private final ApplicationMetrics metrics; private final Optional<String> pemDeployKey; private final List<AssignedRotation> rotations; private final Map<HostName, RotationStatus> rotationStatus; /** * Used to create a locked application * * @param application The application to lock. * @param lock The lock for the application. */ LockedApplication(Application application, Lock lock) { this(Objects.requireNonNull(lock, "lock cannot be null"), application.id(), application.createdAt(), application.deploymentSpec(), application.validationOverrides(), application.deployments(), application.deploymentJobs(), application.change(), application.outstandingChange(), application.ownershipIssueId(), application.owner(), application.majorVersion(), application.metrics(), application.pemDeployKey(), application.assignedRotations(), application.rotationStatus()); } private LockedApplication(Lock lock, ApplicationId id, Instant createdAt, DeploymentSpec deploymentSpec, ValidationOverrides validationOverrides, Map<ZoneId, Deployment> deployments, DeploymentJobs deploymentJobs, Change change, Change outstandingChange, Optional<IssueId> ownershipIssueId, Optional<User> owner, OptionalInt majorVersion, ApplicationMetrics metrics, Optional<String> pemDeployKey, List<AssignedRotation> rotations, Map<HostName, RotationStatus> rotationStatus) { this.lock = lock; this.id = id; this.createdAt = createdAt; this.deploymentSpec = deploymentSpec; this.validationOverrides = validationOverrides; this.deployments = deployments; this.deploymentJobs = deploymentJobs; this.change = change; this.outstandingChange = outstandingChange; this.ownershipIssueId = ownershipIssueId; this.owner = owner; this.majorVersion = majorVersion; this.metrics = metrics; this.pemDeployKey = pemDeployKey; this.rotations = rotations; this.rotationStatus = rotationStatus; } /** Returns a read-only copy of this */ public Application get() { return new Application(id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withBuiltInternally(boolean builtInternally) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.withBuiltInternally(builtInternally), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withProjectId(OptionalLong projectId) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.withProjectId(projectId), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withDeploymentIssueId(IssueId issueId) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.with(issueId), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withJobPause(JobType jobType, OptionalLong pausedUntil) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.withPause(jobType, pausedUntil), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withJobCompletion(long projectId, JobType jobType, JobStatus.JobRun completion, Optional<DeploymentJobs.JobError> jobError) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.withCompletion(projectId, jobType, completion, jobError), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withJobTriggering(JobType jobType, JobStatus.JobRun job) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.withTriggering(jobType, job), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withNewDeployment(ZoneId zone, ApplicationVersion applicationVersion, Version version, Instant instant, Map<DeploymentMetrics.Warning, Integer> warnings) { Deployment previousDeployment = deployments.getOrDefault(zone, new Deployment(zone, applicationVersion, version, instant)); Deployment newDeployment = new Deployment(zone, applicationVersion, version, instant, previousDeployment.clusterUtils(), previousDeployment.clusterInfo(), previousDeployment.metrics().with(warnings), previousDeployment.activity()); return with(newDeployment); } public LockedApplication withClusterUtilization(ZoneId zone, Map<ClusterSpec.Id, ClusterUtilization> clusterUtilization) { Deployment deployment = deployments.get(zone); if (deployment == null) return this; return with(deployment.withClusterUtils(clusterUtilization)); } public LockedApplication withClusterInfo(ZoneId zone, Map<ClusterSpec.Id, ClusterInfo> clusterInfo) { Deployment deployment = deployments.get(zone); if (deployment == null) return this; return with(deployment.withClusterInfo(clusterInfo)); } public LockedApplication recordActivityAt(Instant instant, ZoneId zone) { Deployment deployment = deployments.get(zone); if (deployment == null) return this; return with(deployment.recordActivityAt(instant)); } public LockedApplication with(ZoneId zone, DeploymentMetrics deploymentMetrics) { Deployment deployment = deployments.get(zone); if (deployment == null) return this; return with(deployment.withMetrics(deploymentMetrics)); } public LockedApplication withoutDeploymentIn(ZoneId zone) { Map<ZoneId, Deployment> deployments = new LinkedHashMap<>(this.deployments); deployments.remove(zone); return with(deployments); } public LockedApplication withoutDeploymentJob(JobType jobType) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs.without(jobType), change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication with(DeploymentSpec deploymentSpec) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication with(ValidationOverrides validationOverrides) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withChange(Change change) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withOutstandingChange(Change outstandingChange) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withOwnershipIssueId(IssueId issueId) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, Optional.ofNullable(issueId), owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withOwner(User owner) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, Optional.ofNullable(owner), majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } /** Set a major version for this, or set to null to remove any major version override */ public LockedApplication withMajorVersion(Integer majorVersion) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion == null ? OptionalInt.empty() : OptionalInt.of(majorVersion), metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication with(MetricsService.ApplicationMetrics metrics) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } public LockedApplication withPemDeployKey(String pemDeployKey) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, Optional.ofNullable(pemDeployKey), rotations, rotationStatus); } public LockedApplication withRotationStatus(Map<HostName, RotationStatus> rotationStatus) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } /** Don't expose non-leaf sub-objects. */ private LockedApplication with(Deployment deployment) { Map<ZoneId, Deployment> deployments = new LinkedHashMap<>(this.deployments); deployments.put(deployment.zone(), deployment); return with(deployments); } private LockedApplication with(Map<ZoneId, Deployment> deployments) { return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change, outstandingChange, ownershipIssueId, owner, majorVersion, metrics, pemDeployKey, rotations, rotationStatus); } @Override public String toString() { return "application '" + id + "'"; } }
`DeploymentSpec` already enforces this.
public List<AssignedRotation> getOrAssignRotations(Application application, RotationLock lock) { if (application.deploymentSpec().globalServiceId().isPresent() && ! application.deploymentSpec().endpoints().isEmpty()) { throw new IllegalArgumentException("Cannot provision rotations with both global-service-id and 'endpoints'"); } if (application.deploymentSpec().globalServiceId().isPresent()) { final var regions = application.deploymentSpec().zones().stream() .flatMap(zone -> zone.region().stream()) .collect(Collectors.toSet()); final var rotation = getOrAssignRotation(application, lock); return List.of( new AssignedRotation( new ClusterSpec.Id(application.deploymentSpec().globalServiceId().get()), EndpointId.default_(), rotation.id(), regions ) ); } final var availableRotations = new ArrayList<>(availableRotations(lock).values()); final var assignments = application.assignedRotations().stream() .collect( Collectors.toMap( AssignedRotation::endpointId, Function.identity(), (a, b) -> { throw new IllegalStateException("Duplicate entries: " + a + ", " + b); }, LinkedHashMap::new ) ); application.deploymentSpec().endpoints().stream() .filter(endpoint -> ! assignments.containsKey(new EndpointId(endpoint.endpointId()))) .map(endpoint -> { return new AssignedRotation( new ClusterSpec.Id(endpoint.containerId()), EndpointId.of(endpoint.endpointId()), availableRotations.remove(0).id(), endpoint.regions() ); }) .forEach(assignment -> { assignments.put(assignment.endpointId(), assignment); }); return List.copyOf(assignments.values()); }
if (application.deploymentSpec().globalServiceId().isPresent() && ! application.deploymentSpec().endpoints().isEmpty()) {
public List<AssignedRotation> getOrAssignRotations(Application application, RotationLock lock) { if (application.deploymentSpec().globalServiceId().isPresent() && ! application.deploymentSpec().endpoints().isEmpty()) { throw new IllegalArgumentException("Cannot provision rotations with both global-service-id and 'endpoints'"); } if (application.deploymentSpec().globalServiceId().isPresent()) { final var regions = application.deploymentSpec().zones().stream() .filter(zone -> zone.environment().isProduction()) .flatMap(zone -> zone.region().stream()) .collect(Collectors.toSet()); final var rotation = getOrAssignRotation(application, lock); return List.of( new AssignedRotation( new ClusterSpec.Id(application.deploymentSpec().globalServiceId().get()), EndpointId.default_(), rotation.id(), regions ) ); } final Map<EndpointId, AssignedRotation> existingAssignments = existingEndpointAssignments(application); final Map<EndpointId, AssignedRotation> updatedAssignments = assignRotationsToEndpoints(application, existingAssignments, lock); existingAssignments.putAll(updatedAssignments); return List.copyOf(existingAssignments.values()); }
class RotationRepository { private static final Logger log = Logger.getLogger(RotationRepository.class.getName()); private final Map<RotationId, Rotation> allRotations; private final ApplicationController applications; private final CuratorDb curator; public RotationRepository(RotationsConfig rotationsConfig, ApplicationController applications, CuratorDb curator) { this.allRotations = from(rotationsConfig); this.applications = applications; this.curator = curator; } /** Acquire a exclusive lock for this */ public RotationLock lock() { return new RotationLock(curator.lockRotations()); } /** Get rotation for given application */ public Optional<Rotation> getRotation(Application application) { return application.rotations().stream().map(allRotations::get).findFirst(); } /** Get rotation for the given rotationId */ public Optional<Rotation> getRotation(RotationId rotationId) { return Optional.of(allRotations.get(rotationId)); } /** * Returns a rotation for the given application * * If a rotation is already assigned to the application, that rotation will be returned. * If no rotation is assigned, return an available rotation. The caller is responsible for assigning the rotation. * * @param application The application requesting a rotation * @param lock Lock which must be acquired by the caller */ public Rotation getOrAssignRotation(Application application, RotationLock lock) { if (! application.rotations().isEmpty()) { return allRotations.get(application.rotations().get(0)); } if (application.deploymentSpec().globalServiceId().isEmpty()) { throw new IllegalArgumentException("global-service-id is not set in deployment spec"); } long productionZones = application.deploymentSpec().zones().stream() .filter(zone -> zone.deploysTo(Environment.prod)) .count(); if (productionZones < 2) { throw new IllegalArgumentException("global-service-id is set but less than 2 prod zones are defined"); } return findAvailableRotation(application, lock); } /** * Returns all unassigned rotations * @param lock Lock which must be acquired by the caller */ public Map<RotationId, Rotation> availableRotations(@SuppressWarnings("unused") RotationLock lock) { List<RotationId> assignedRotations = applications.asList().stream() .filter(application -> ! application.rotations().isEmpty()) .flatMap(application -> application.rotations().stream()) .collect(Collectors.toList()); Map<RotationId, Rotation> unassignedRotations = new LinkedHashMap<>(this.allRotations); assignedRotations.forEach(unassignedRotations::remove); return Collections.unmodifiableMap(unassignedRotations); } private Rotation findAvailableRotation(Application application, RotationLock lock) { Map<RotationId, Rotation> availableRotations = availableRotations(lock); if (availableRotations.isEmpty()) { throw new IllegalStateException("Unable to assign global rotation to " + application.id() + " - no rotations available"); } RotationId rotation = availableRotations.keySet().iterator().next(); log.info(String.format("Offering %s to application %s", rotation, application.id())); return allRotations.get(rotation); } /** Returns a immutable map of rotation ID to rotation sorted by rotation ID */ private static Map<RotationId, Rotation> from(RotationsConfig rotationConfig) { return rotationConfig.rotations().entrySet().stream() .map(entry -> new Rotation(new RotationId(entry.getKey()), entry.getValue().trim())) .sorted(Comparator.comparing(rotation -> rotation.id().asString())) .collect(collectingAndThen(Collectors.toMap(Rotation::id, rotation -> rotation, (k, v) -> v, LinkedHashMap::new), Collections::unmodifiableMap)); } }
class RotationRepository { private static final Logger log = Logger.getLogger(RotationRepository.class.getName()); private final Map<RotationId, Rotation> allRotations; private final ApplicationController applications; private final CuratorDb curator; public RotationRepository(RotationsConfig rotationsConfig, ApplicationController applications, CuratorDb curator) { this.allRotations = from(rotationsConfig); this.applications = applications; this.curator = curator; } /** Acquire a exclusive lock for this */ public RotationLock lock() { return new RotationLock(curator.lockRotations()); } /** Get rotation for given application */ public Optional<Rotation> getRotation(Application application) { return application.rotations().stream().map(allRotations::get).findFirst(); } /** Get rotation for the given rotationId */ public Optional<Rotation> getRotation(RotationId rotationId) { return Optional.of(allRotations.get(rotationId)); } /** * Returns a rotation for the given application * * If a rotation is already assigned to the application, that rotation will be returned. * If no rotation is assigned, return an available rotation. The caller is responsible for assigning the rotation. * * @param application The application requesting a rotation * @param lock Lock which must be acquired by the caller */ public Rotation getOrAssignRotation(Application application, RotationLock lock) { if (! application.rotations().isEmpty()) { return allRotations.get(application.rotations().get(0)); } if (application.deploymentSpec().globalServiceId().isEmpty()) { throw new IllegalArgumentException("global-service-id is not set in deployment spec"); } long productionZones = application.deploymentSpec().zones().stream() .filter(zone -> zone.deploysTo(Environment.prod)) .count(); if (productionZones < 2) { throw new IllegalArgumentException("global-service-id is set but less than 2 prod zones are defined"); } return findAvailableRotation(application, lock); } /** * Returns rotation assignments for all endpoints in application. * * If rotations are already assigned, these will be returned. * If rotations are not assigned, a new assignment will be created taking new rotations from the repository. * This method supports both global-service-id as well as the new endpoints tag. * * @param application The application requesting rotations. * @param lock Lock which by acquired by the caller * @return List of rotation assignments - either new or existing. */ private Map<EndpointId, AssignedRotation> assignRotationsToEndpoints(Application application, Map<EndpointId, AssignedRotation> existingAssignments, RotationLock lock) { final var availableRotations = new ArrayList<>(availableRotations(lock).values()); final var neededRotations = application.deploymentSpec().endpoints().stream() .filter(Predicate.not(endpoint -> existingAssignments.containsKey(EndpointId.of(endpoint.endpointId())))) .collect(Collectors.toSet()); if (neededRotations.size() > availableRotations.size()) { throw new IllegalStateException("Hosted Vespa ran out of rotations, unable to assign rotation: need " + neededRotations.size() + ", have " + availableRotations.size()); } return neededRotations.stream() .map(endpoint -> { return new AssignedRotation( new ClusterSpec.Id(endpoint.containerId()), EndpointId.of(endpoint.endpointId()), availableRotations.remove(0).id(), endpoint.regions() ); }) .collect( Collectors.toMap( AssignedRotation::endpointId, Function.identity(), (a, b) -> { throw new IllegalStateException("Duplicate entries:" + a + ", " + b); }, LinkedHashMap::new ) ); } private Map<EndpointId, AssignedRotation> existingEndpointAssignments(Application application) { final Function<EndpointId, Set<RegionName>> configuredRegionsForEndpoint = endpointId -> { return application.deploymentSpec().endpoints().stream() .filter(endpoint -> endpointId.id().equals(endpoint.endpointId())) .map(Endpoint::regions) .findFirst() .orElse(Set.of()); }; final Function<AssignedRotation, AssignedRotation> assignedRotationWithConfiguredRegions = assignedRotation -> { return new AssignedRotation( assignedRotation.clusterId(), assignedRotation.endpointId(), assignedRotation.rotationId(), configuredRegionsForEndpoint.apply(assignedRotation.endpointId()) ); }; return application.assignedRotations().stream() .collect( Collectors.toMap( AssignedRotation::endpointId, assignedRotationWithConfiguredRegions, (a, b) -> { throw new IllegalStateException("Duplicate entries: " + a + ", " + b); }, LinkedHashMap::new ) ); } /** * Returns all unassigned rotations * @param lock Lock which must be acquired by the caller */ public Map<RotationId, Rotation> availableRotations(@SuppressWarnings("unused") RotationLock lock) { List<RotationId> assignedRotations = applications.asList().stream() .filter(application -> ! application.rotations().isEmpty()) .flatMap(application -> application.rotations().stream()) .collect(Collectors.toList()); Map<RotationId, Rotation> unassignedRotations = new LinkedHashMap<>(this.allRotations); assignedRotations.forEach(unassignedRotations::remove); return Collections.unmodifiableMap(unassignedRotations); } private Rotation findAvailableRotation(Application application, RotationLock lock) { Map<RotationId, Rotation> availableRotations = availableRotations(lock); if (availableRotations.isEmpty()) { throw new IllegalStateException("Unable to assign global rotation to " + application.id() + " - no rotations available"); } RotationId rotation = availableRotations.keySet().iterator().next(); log.info(String.format("Offering %s to application %s", rotation, application.id())); return allRotations.get(rotation); } /** Returns a immutable map of rotation ID to rotation sorted by rotation ID */ private static Map<RotationId, Rotation> from(RotationsConfig rotationConfig) { return rotationConfig.rotations().entrySet().stream() .map(entry -> new Rotation(new RotationId(entry.getKey()), entry.getValue().trim())) .sorted(Comparator.comparing(rotation -> rotation.id().asString())) .collect(collectingAndThen(Collectors.toMap(Rotation::id, rotation -> rotation, (k, v) -> v, LinkedHashMap::new), Collections::unmodifiableMap)); } }
I agree, but I think it doesn't hurt to enforce it here as well?
public List<AssignedRotation> getOrAssignRotations(Application application, RotationLock lock) { if (application.deploymentSpec().globalServiceId().isPresent() && ! application.deploymentSpec().endpoints().isEmpty()) { throw new IllegalArgumentException("Cannot provision rotations with both global-service-id and 'endpoints'"); } if (application.deploymentSpec().globalServiceId().isPresent()) { final var regions = application.deploymentSpec().zones().stream() .flatMap(zone -> zone.region().stream()) .collect(Collectors.toSet()); final var rotation = getOrAssignRotation(application, lock); return List.of( new AssignedRotation( new ClusterSpec.Id(application.deploymentSpec().globalServiceId().get()), EndpointId.default_(), rotation.id(), regions ) ); } final var availableRotations = new ArrayList<>(availableRotations(lock).values()); final var assignments = application.assignedRotations().stream() .collect( Collectors.toMap( AssignedRotation::endpointId, Function.identity(), (a, b) -> { throw new IllegalStateException("Duplicate entries: " + a + ", " + b); }, LinkedHashMap::new ) ); application.deploymentSpec().endpoints().stream() .filter(endpoint -> ! assignments.containsKey(new EndpointId(endpoint.endpointId()))) .map(endpoint -> { return new AssignedRotation( new ClusterSpec.Id(endpoint.containerId()), EndpointId.of(endpoint.endpointId()), availableRotations.remove(0).id(), endpoint.regions() ); }) .forEach(assignment -> { assignments.put(assignment.endpointId(), assignment); }); return List.copyOf(assignments.values()); }
if (application.deploymentSpec().globalServiceId().isPresent() && ! application.deploymentSpec().endpoints().isEmpty()) {
public List<AssignedRotation> getOrAssignRotations(Application application, RotationLock lock) { if (application.deploymentSpec().globalServiceId().isPresent() && ! application.deploymentSpec().endpoints().isEmpty()) { throw new IllegalArgumentException("Cannot provision rotations with both global-service-id and 'endpoints'"); } if (application.deploymentSpec().globalServiceId().isPresent()) { final var regions = application.deploymentSpec().zones().stream() .filter(zone -> zone.environment().isProduction()) .flatMap(zone -> zone.region().stream()) .collect(Collectors.toSet()); final var rotation = getOrAssignRotation(application, lock); return List.of( new AssignedRotation( new ClusterSpec.Id(application.deploymentSpec().globalServiceId().get()), EndpointId.default_(), rotation.id(), regions ) ); } final Map<EndpointId, AssignedRotation> existingAssignments = existingEndpointAssignments(application); final Map<EndpointId, AssignedRotation> updatedAssignments = assignRotationsToEndpoints(application, existingAssignments, lock); existingAssignments.putAll(updatedAssignments); return List.copyOf(existingAssignments.values()); }
class RotationRepository { private static final Logger log = Logger.getLogger(RotationRepository.class.getName()); private final Map<RotationId, Rotation> allRotations; private final ApplicationController applications; private final CuratorDb curator; public RotationRepository(RotationsConfig rotationsConfig, ApplicationController applications, CuratorDb curator) { this.allRotations = from(rotationsConfig); this.applications = applications; this.curator = curator; } /** Acquire a exclusive lock for this */ public RotationLock lock() { return new RotationLock(curator.lockRotations()); } /** Get rotation for given application */ public Optional<Rotation> getRotation(Application application) { return application.rotations().stream().map(allRotations::get).findFirst(); } /** Get rotation for the given rotationId */ public Optional<Rotation> getRotation(RotationId rotationId) { return Optional.of(allRotations.get(rotationId)); } /** * Returns a rotation for the given application * * If a rotation is already assigned to the application, that rotation will be returned. * If no rotation is assigned, return an available rotation. The caller is responsible for assigning the rotation. * * @param application The application requesting a rotation * @param lock Lock which must be acquired by the caller */ public Rotation getOrAssignRotation(Application application, RotationLock lock) { if (! application.rotations().isEmpty()) { return allRotations.get(application.rotations().get(0)); } if (application.deploymentSpec().globalServiceId().isEmpty()) { throw new IllegalArgumentException("global-service-id is not set in deployment spec"); } long productionZones = application.deploymentSpec().zones().stream() .filter(zone -> zone.deploysTo(Environment.prod)) .count(); if (productionZones < 2) { throw new IllegalArgumentException("global-service-id is set but less than 2 prod zones are defined"); } return findAvailableRotation(application, lock); } /** * Returns all unassigned rotations * @param lock Lock which must be acquired by the caller */ public Map<RotationId, Rotation> availableRotations(@SuppressWarnings("unused") RotationLock lock) { List<RotationId> assignedRotations = applications.asList().stream() .filter(application -> ! application.rotations().isEmpty()) .flatMap(application -> application.rotations().stream()) .collect(Collectors.toList()); Map<RotationId, Rotation> unassignedRotations = new LinkedHashMap<>(this.allRotations); assignedRotations.forEach(unassignedRotations::remove); return Collections.unmodifiableMap(unassignedRotations); } private Rotation findAvailableRotation(Application application, RotationLock lock) { Map<RotationId, Rotation> availableRotations = availableRotations(lock); if (availableRotations.isEmpty()) { throw new IllegalStateException("Unable to assign global rotation to " + application.id() + " - no rotations available"); } RotationId rotation = availableRotations.keySet().iterator().next(); log.info(String.format("Offering %s to application %s", rotation, application.id())); return allRotations.get(rotation); } /** Returns a immutable map of rotation ID to rotation sorted by rotation ID */ private static Map<RotationId, Rotation> from(RotationsConfig rotationConfig) { return rotationConfig.rotations().entrySet().stream() .map(entry -> new Rotation(new RotationId(entry.getKey()), entry.getValue().trim())) .sorted(Comparator.comparing(rotation -> rotation.id().asString())) .collect(collectingAndThen(Collectors.toMap(Rotation::id, rotation -> rotation, (k, v) -> v, LinkedHashMap::new), Collections::unmodifiableMap)); } }
class RotationRepository { private static final Logger log = Logger.getLogger(RotationRepository.class.getName()); private final Map<RotationId, Rotation> allRotations; private final ApplicationController applications; private final CuratorDb curator; public RotationRepository(RotationsConfig rotationsConfig, ApplicationController applications, CuratorDb curator) { this.allRotations = from(rotationsConfig); this.applications = applications; this.curator = curator; } /** Acquire a exclusive lock for this */ public RotationLock lock() { return new RotationLock(curator.lockRotations()); } /** Get rotation for given application */ public Optional<Rotation> getRotation(Application application) { return application.rotations().stream().map(allRotations::get).findFirst(); } /** Get rotation for the given rotationId */ public Optional<Rotation> getRotation(RotationId rotationId) { return Optional.of(allRotations.get(rotationId)); } /** * Returns a rotation for the given application * * If a rotation is already assigned to the application, that rotation will be returned. * If no rotation is assigned, return an available rotation. The caller is responsible for assigning the rotation. * * @param application The application requesting a rotation * @param lock Lock which must be acquired by the caller */ public Rotation getOrAssignRotation(Application application, RotationLock lock) { if (! application.rotations().isEmpty()) { return allRotations.get(application.rotations().get(0)); } if (application.deploymentSpec().globalServiceId().isEmpty()) { throw new IllegalArgumentException("global-service-id is not set in deployment spec"); } long productionZones = application.deploymentSpec().zones().stream() .filter(zone -> zone.deploysTo(Environment.prod)) .count(); if (productionZones < 2) { throw new IllegalArgumentException("global-service-id is set but less than 2 prod zones are defined"); } return findAvailableRotation(application, lock); } /** * Returns rotation assignments for all endpoints in application. * * If rotations are already assigned, these will be returned. * If rotations are not assigned, a new assignment will be created taking new rotations from the repository. * This method supports both global-service-id as well as the new endpoints tag. * * @param application The application requesting rotations. * @param lock Lock which by acquired by the caller * @return List of rotation assignments - either new or existing. */ private Map<EndpointId, AssignedRotation> assignRotationsToEndpoints(Application application, Map<EndpointId, AssignedRotation> existingAssignments, RotationLock lock) { final var availableRotations = new ArrayList<>(availableRotations(lock).values()); final var neededRotations = application.deploymentSpec().endpoints().stream() .filter(Predicate.not(endpoint -> existingAssignments.containsKey(EndpointId.of(endpoint.endpointId())))) .collect(Collectors.toSet()); if (neededRotations.size() > availableRotations.size()) { throw new IllegalStateException("Hosted Vespa ran out of rotations, unable to assign rotation: need " + neededRotations.size() + ", have " + availableRotations.size()); } return neededRotations.stream() .map(endpoint -> { return new AssignedRotation( new ClusterSpec.Id(endpoint.containerId()), EndpointId.of(endpoint.endpointId()), availableRotations.remove(0).id(), endpoint.regions() ); }) .collect( Collectors.toMap( AssignedRotation::endpointId, Function.identity(), (a, b) -> { throw new IllegalStateException("Duplicate entries:" + a + ", " + b); }, LinkedHashMap::new ) ); } private Map<EndpointId, AssignedRotation> existingEndpointAssignments(Application application) { final Function<EndpointId, Set<RegionName>> configuredRegionsForEndpoint = endpointId -> { return application.deploymentSpec().endpoints().stream() .filter(endpoint -> endpointId.id().equals(endpoint.endpointId())) .map(Endpoint::regions) .findFirst() .orElse(Set.of()); }; final Function<AssignedRotation, AssignedRotation> assignedRotationWithConfiguredRegions = assignedRotation -> { return new AssignedRotation( assignedRotation.clusterId(), assignedRotation.endpointId(), assignedRotation.rotationId(), configuredRegionsForEndpoint.apply(assignedRotation.endpointId()) ); }; return application.assignedRotations().stream() .collect( Collectors.toMap( AssignedRotation::endpointId, assignedRotationWithConfiguredRegions, (a, b) -> { throw new IllegalStateException("Duplicate entries: " + a + ", " + b); }, LinkedHashMap::new ) ); } /** * Returns all unassigned rotations * @param lock Lock which must be acquired by the caller */ public Map<RotationId, Rotation> availableRotations(@SuppressWarnings("unused") RotationLock lock) { List<RotationId> assignedRotations = applications.asList().stream() .filter(application -> ! application.rotations().isEmpty()) .flatMap(application -> application.rotations().stream()) .collect(Collectors.toList()); Map<RotationId, Rotation> unassignedRotations = new LinkedHashMap<>(this.allRotations); assignedRotations.forEach(unassignedRotations::remove); return Collections.unmodifiableMap(unassignedRotations); } private Rotation findAvailableRotation(Application application, RotationLock lock) { Map<RotationId, Rotation> availableRotations = availableRotations(lock); if (availableRotations.isEmpty()) { throw new IllegalStateException("Unable to assign global rotation to " + application.id() + " - no rotations available"); } RotationId rotation = availableRotations.keySet().iterator().next(); log.info(String.format("Offering %s to application %s", rotation, application.id())); return allRotations.get(rotation); } /** Returns a immutable map of rotation ID to rotation sorted by rotation ID */ private static Map<RotationId, Rotation> from(RotationsConfig rotationConfig) { return rotationConfig.rotations().entrySet().stream() .map(entry -> new Rotation(new RotationId(entry.getKey()), entry.getValue().trim())) .sorted(Comparator.comparing(rotation -> rotation.id().asString())) .collect(collectingAndThen(Collectors.toMap(Rotation::id, rotation -> rotation, (k, v) -> v, LinkedHashMap::new), Collections::unmodifiableMap)); } }
This extra check is not needed unless performance is really critical. A simpler approach would be to only keep the code inside the synchornized block and change `instance` to be a plain reference.
static MessageBusVisitorSessionFactory sharedInstance() { var ref = instance.getAcquire(); if (ref != null) { return ref; } synchronized (initMonitor) { ref = instance.getAcquire(); if (ref != null) { return ref; } ref = new MessageBusVisitorSessionFactory(); instance.setRelease(ref); } return ref; }
var ref = instance.getAcquire();
static MessageBusVisitorSessionFactory sharedInstance() { var ref = instance.getAcquire(); if (ref != null) { return ref; } synchronized (initMonitor) { ref = instance.getAcquire(); if (ref != null) { return ref; } ref = new MessageBusVisitorSessionFactory(); instance.setRelease(ref); } return ref; }
class which is lazily created in a thread-safe * manner the first time this method is invoked. * * May throw any config-related exception if subscription fails. */
class which is lazily created in a thread-safe * manner the first time this method is invoked. * * May throw any config-related exception if subscription fails. */
Consider changing the members of `MessageBusVisitorSessionFactory` to be non-static, and instead enforce the singleton properties by having a single `MessageBusVisitorSessionFactory` instance as a static field in the outer class.
static MessageBusVisitorSessionFactory sharedInstance() { var ref = instance.getAcquire(); if (ref != null) { return ref; } synchronized (initMonitor) { ref = instance.getAcquire(); if (ref != null) { return ref; } ref = new MessageBusVisitorSessionFactory(); instance.setRelease(ref); } return ref; }
var ref = instance.getAcquire();
static MessageBusVisitorSessionFactory sharedInstance() { var ref = instance.getAcquire(); if (ref != null) { return ref; } synchronized (initMonitor) { ref = instance.getAcquire(); if (ref != null) { return ref; } ref = new MessageBusVisitorSessionFactory(); instance.setRelease(ref); } return ref; }
class which is lazily created in a thread-safe * manner the first time this method is invoked. * * May throw any config-related exception if subscription fails. */
class which is lazily created in a thread-safe * manner the first time this method is invoked. * * May throw any config-related exception if subscription fails. */
If `MessageBusVisitorSessionFactory` is only used in context of JDisc, consider having it as a component instead. That is the idiomatic way of doing singletons in JDisc.
static MessageBusVisitorSessionFactory sharedInstance() { var ref = instance.getAcquire(); if (ref != null) { return ref; } synchronized (initMonitor) { ref = instance.getAcquire(); if (ref != null) { return ref; } ref = new MessageBusVisitorSessionFactory(); instance.setRelease(ref); } return ref; }
var ref = instance.getAcquire();
static MessageBusVisitorSessionFactory sharedInstance() { var ref = instance.getAcquire(); if (ref != null) { return ref; } synchronized (initMonitor) { ref = instance.getAcquire(); if (ref != null) { return ref; } ref = new MessageBusVisitorSessionFactory(); instance.setRelease(ref); } return ref; }
class which is lazily created in a thread-safe * manner the first time this method is invoked. * * May throw any config-related exception if subscription fails. */
class which is lazily created in a thread-safe * manner the first time this method is invoked. * * May throw any config-related exception if subscription fails. */
I agree with your points and will consider them for a follow-up. For now I will use this TTAS approach since I try to avoid locks in serving paths if I can do so in a safe and contained manner. I think what we really want is a simple "call once" library abstraction with similar semantics to C++11's function-level `static` or `call_once` functionality.
static MessageBusVisitorSessionFactory sharedInstance() { var ref = instance.getAcquire(); if (ref != null) { return ref; } synchronized (initMonitor) { ref = instance.getAcquire(); if (ref != null) { return ref; } ref = new MessageBusVisitorSessionFactory(); instance.setRelease(ref); } return ref; }
var ref = instance.getAcquire();
static MessageBusVisitorSessionFactory sharedInstance() { var ref = instance.getAcquire(); if (ref != null) { return ref; } synchronized (initMonitor) { ref = instance.getAcquire(); if (ref != null) { return ref; } ref = new MessageBusVisitorSessionFactory(); instance.setRelease(ref); } return ref; }
class which is lazily created in a thread-safe * manner the first time this method is invoked. * * May throw any config-related exception if subscription fails. */
class which is lazily created in a thread-safe * manner the first time this method is invoked. * * May throw any config-related exception if subscription fails. */
Consider including the exception in the log entry (so that the stack trace is printed to the log).
private JsonResponse v1Response(URI requestUri) { try { return new JsonResponse(OK, v1Content(requestUri)); } catch (JSONException e) { log.warning("Bad JSON construction in " + V1_PATH + " response: " + e.getMessage()); return new ErrorResponse(INTERNAL_SERVER_ERROR, "An error occurred, please try path '" + VALUES_PATH + "'"); } }
log.warning("Bad JSON construction in " + V1_PATH + " response: " + e.getMessage());
private JsonResponse v1Response(URI requestUri) { try { return new JsonResponse(OK, v1Content(requestUri)); } catch (JSONException e) { log.warning("Bad JSON construction in " + V1_PATH + " response: " + e.getMessage()); return new ErrorResponse(INTERNAL_SERVER_ERROR, "An error occurred, please try path '" + VALUES_PATH + "'"); } }
class MetricsHandler extends ThreadedHttpRequestHandler { static final String V1_PATH = "/metrics/v1"; static final String VALUES_PATH = V1_PATH + "/values"; private final ValuesFetcher valuesFetcher; @Inject public MetricsHandler(Executor executor, MetricsManager metricsManager, VespaServices vespaServices, MetricsConsumers metricsConsumers) { super(executor); valuesFetcher = new ValuesFetcher(metricsManager, vespaServices, metricsConsumers); } @Override public HttpResponse handle(HttpRequest request) { if (request.getMethod() != GET) return new JsonResponse(METHOD_NOT_ALLOWED, "Only GET is supported"); Path path = new Path(request.getUri()); if (path.matches(V1_PATH)) return v1Response(request.getUri()); if (path.matches(VALUES_PATH)) return valuesResponse(request); return new ErrorResponse(NOT_FOUND, "No content at given path"); } private JsonResponse valuesResponse(HttpRequest request) { try { return new JsonResponse(OK, valuesFetcher.fetch(request.getProperty("consumer"))); } catch (JsonRenderingException e) { return new ErrorResponse(INTERNAL_SERVER_ERROR, e.getMessage()); } } private String v1Content(URI requestUri) throws JSONException { int port = requestUri.getPort(); String host = requestUri.getHost(); StringBuilder base = new StringBuilder("http: base.append(host); if (port >= 0) { base.append(":").append(port); } String uriBase = base.toString(); JSONArray linkList = new JSONArray(); for (String api : new String[] {VALUES_PATH}) { JSONObject resource = new JSONObject(); resource.put("url", uriBase + api); linkList.put(resource); } return new JSONObject().put("resources", linkList).toString(4); } }
class MetricsHandler extends ThreadedHttpRequestHandler { static final String V1_PATH = "/metrics/v1"; static final String VALUES_PATH = V1_PATH + "/values"; private final ValuesFetcher valuesFetcher; @Inject public MetricsHandler(Executor executor, MetricsManager metricsManager, VespaServices vespaServices, MetricsConsumers metricsConsumers) { super(executor); valuesFetcher = new ValuesFetcher(metricsManager, vespaServices, metricsConsumers); } @Override public HttpResponse handle(HttpRequest request) { if (request.getMethod() != GET) return new JsonResponse(METHOD_NOT_ALLOWED, "Only GET is supported"); Path path = new Path(request.getUri()); if (path.matches(V1_PATH)) return v1Response(request.getUri()); if (path.matches(VALUES_PATH)) return valuesResponse(request); return new ErrorResponse(NOT_FOUND, "No content at given path"); } private JsonResponse valuesResponse(HttpRequest request) { try { return new JsonResponse(OK, valuesFetcher.fetch(request.getProperty("consumer"))); } catch (JsonRenderingException e) { return new ErrorResponse(INTERNAL_SERVER_ERROR, e.getMessage()); } } private String v1Content(URI requestUri) throws JSONException { int port = requestUri.getPort(); String host = requestUri.getHost(); StringBuilder base = new StringBuilder("http: base.append(host); if (port >= 0) { base.append(":").append(port); } String uriBase = base.toString(); JSONArray linkList = new JSONArray(); for (String api : new String[] {VALUES_PATH}) { JSONObject resource = new JSONObject(); resource.put("url", uriBase + api); linkList.put(resource); } return new JSONObject().put("resources", linkList).toString(4); } }
Consider reusing the `ObjectMapper` (e.g having it as a static field).
static String asErrorJson(String message) { try { return new ObjectMapper().writeValueAsString(Map.of("error", message)); } catch (JsonProcessingException e) { log.log(WARNING, "Could not encode error message to json:", e); return "Could not encode error message to json, check the log for details."; } }
return new ObjectMapper().writeValueAsString(Map.of("error", message));
static String asErrorJson(String message) { try { return new ObjectMapper().writeValueAsString(Map.of("error", message)); } catch (JsonProcessingException e) { log.log(WARNING, "Could not encode error message to json:", e); return "Could not encode error message to json, check the log for details."; } }
class ErrorResponse extends JsonResponse { private static Logger log = Logger.getLogger(ErrorResponse.class.getName()); ErrorResponse(int code, String message) { super(code, asErrorJson(message)); } }
class ErrorResponse extends JsonResponse { private static Logger log = Logger.getLogger(ErrorResponse.class.getName()); ErrorResponse(int code, String message) { super(code, asErrorJson(message)); } }
Exception message unintentionally missing from log message?
static String asErrorJson(String message) { try { return new ObjectMapper().writeValueAsString(Map.of("error", message)); } catch (JsonProcessingException e) { log.log(WARNING, "Could not encode error message to json:", e); return "Could not encode error message to json, check the log for details."; } }
log.log(WARNING, "Could not encode error message to json:", e);
static String asErrorJson(String message) { try { return new ObjectMapper().writeValueAsString(Map.of("error", message)); } catch (JsonProcessingException e) { log.log(WARNING, "Could not encode error message to json:", e); return "Could not encode error message to json, check the log for details."; } }
class ErrorResponse extends JsonResponse { private static Logger log = Logger.getLogger(ErrorResponse.class.getName()); ErrorResponse(int code, String message) { super(code, asErrorJson(message)); } }
class ErrorResponse extends JsonResponse { private static Logger log = Logger.getLogger(ErrorResponse.class.getName()); ErrorResponse(int code, String message) { super(code, asErrorJson(message)); } }
`map.keySet().removeIf(...)`
public void pruneDiffs(TenantName tenantName, ApplicationName applicationName, long beforeBuildNumber) { Optional.ofNullable(diffs.get(appId(tenantName, applicationName))) .ifPresent(map -> map.keySet().stream() .filter(buildNumber -> buildNumber < beforeBuildNumber) .forEach(map::remove)); }
.ifPresent(map -> map.keySet().stream()
public void pruneDiffs(TenantName tenantName, ApplicationName applicationName, long beforeBuildNumber) { Optional.ofNullable(diffs.get(appId(tenantName, applicationName))) .ifPresent(map -> map.keySet().removeIf(buildNumber -> buildNumber < beforeBuildNumber)); }
class ApplicationStoreMock implements ApplicationStore { private static final byte[] tombstone = new byte[0]; private final Map<ApplicationId, Map<ApplicationVersion, byte[]>> store = new ConcurrentHashMap<>(); private final Map<DeploymentId, byte[]> devStore = new ConcurrentHashMap<>(); private final Map<ApplicationId, Map<Long, String>> diffs = new ConcurrentHashMap<>(); private final Map<DeploymentId, Map<Long, String>> devDiffs = new ConcurrentHashMap<>(); private final Map<ApplicationId, NavigableMap<Instant, byte[]>> meta = new ConcurrentHashMap<>(); private final Map<DeploymentId, NavigableMap<Instant, byte[]>> metaManual = new ConcurrentHashMap<>(); private static ApplicationId appId(TenantName tenant, ApplicationName application) { return ApplicationId.from(tenant, application, InstanceName.defaultName()); } private static ApplicationId testerId(TenantName tenant, ApplicationName application) { return TesterId.of(appId(tenant, application)).id(); } @Override public byte[] get(DeploymentId deploymentId, ApplicationVersion applicationVersion) { if (applicationVersion.isDeployedDirectly()) return requireNonNull(devStore.get(deploymentId)); TenantAndApplicationId tenantAndApplicationId = TenantAndApplicationId.from(deploymentId.applicationId()); byte[] bytes = store.get(appId(tenantAndApplicationId.tenant(), tenantAndApplicationId.application())).get(applicationVersion); if (bytes == null) throw new IllegalArgumentException("No application package found for " + tenantAndApplicationId + " with version " + applicationVersion.id()); return bytes; } @Override public Optional<String> getDiff(TenantName tenantName, ApplicationName applicationName, long buildNumber) { return Optional.ofNullable(diffs.get(appId(tenantName, applicationName))).map(map -> map.get(buildNumber)); } @Override @Override public Optional<byte[]> find(TenantName tenant, ApplicationName application, long buildNumber) { return store.getOrDefault(appId(tenant, application), Map.of()).entrySet().stream() .filter(kv -> kv.getKey().buildNumber().orElse(Long.MIN_VALUE) == buildNumber) .map(Map.Entry::getValue) .findFirst(); } @Override public void put(TenantName tenant, ApplicationName application, ApplicationVersion applicationVersion, byte[] applicationPackage, String diff) { store.computeIfAbsent(appId(tenant, application), __ -> new ConcurrentHashMap<>()).put(applicationVersion, applicationPackage); applicationVersion.buildNumber().ifPresent(buildNumber -> diffs.computeIfAbsent(appId(tenant, application), __ -> new ConcurrentHashMap<>()).put(buildNumber, diff)); } @Override public boolean prune(TenantName tenant, ApplicationName application, ApplicationVersion oldestToRetain) { return store.containsKey(appId(tenant, application)) && store.get(appId(tenant, application)).keySet().removeIf(version -> version.compareTo(oldestToRetain) < 0); } @Override public void removeAll(TenantName tenant, ApplicationName application) { store.remove(appId(tenant, application)); } @Override public byte[] getTester(TenantName tenant, ApplicationName application, ApplicationVersion applicationVersion) { return requireNonNull(store.get(testerId(tenant, application)).get(applicationVersion)); } @Override public void putTester(TenantName tenant, ApplicationName application, ApplicationVersion applicationVersion, byte[] testerPackage) { store.computeIfAbsent(testerId(tenant, application), key -> new ConcurrentHashMap<>()) .put(applicationVersion, testerPackage); } @Override public boolean pruneTesters(TenantName tenant, ApplicationName application, ApplicationVersion oldestToRetain) { return store.containsKey(testerId(tenant, application)) && store.get(testerId(tenant, application)).keySet().removeIf(version -> version.compareTo(oldestToRetain) < 0); } @Override public void removeAllTesters(TenantName tenant, ApplicationName application) { store.remove(testerId(tenant, application)); } @Override public Optional<String> getDevDiff(DeploymentId deploymentId, long buildNumber) { return Optional.ofNullable(devDiffs.get(deploymentId)).map(map -> map.get(buildNumber)); } @Override public void pruneDevDiffs(DeploymentId deploymentId, long beforeBuildNumber) { Optional.ofNullable(devDiffs.get(deploymentId)) .ifPresent(map -> map.keySet().stream() .filter(buildNumber -> buildNumber < beforeBuildNumber) .forEach(map::remove)); } @Override public void putDev(DeploymentId deploymentId, ApplicationVersion applicationVersion, byte[] applicationPackage, String diff) { devStore.put(deploymentId, applicationPackage); applicationVersion.buildNumber().ifPresent(buildNumber -> devDiffs.computeIfAbsent(deploymentId, __ -> new ConcurrentHashMap<>()).put(buildNumber, diff)); } @Override public void putMeta(TenantName tenant, ApplicationName application, Instant now, byte[] metaZip) { meta.putIfAbsent(appId(tenant, application), new ConcurrentSkipListMap<>()); meta.get(appId(tenant, application)).put(now, metaZip); } @Override public void putMetaTombstone(TenantName tenant, ApplicationName application, Instant now) { putMeta(tenant, application, now, tombstone); } @Override public void putMeta(DeploymentId id, Instant now, byte[] metaZip) { metaManual.computeIfAbsent(id, __ -> new ConcurrentSkipListMap<>()).put(now, metaZip); } @Override public void putMetaTombstone(DeploymentId id, Instant now) { putMeta(id, now, tombstone); } @Override public void pruneMeta(Instant oldest) { for (ApplicationId id : meta.keySet()) { Instant activeAtOldest = meta.get(id).lowerKey(oldest); if (activeAtOldest != null) meta.get(id).headMap(activeAtOldest).clear(); if (meta.get(id).lastKey().isBefore(oldest) && meta.get(id).lastEntry().getValue() == tombstone) meta.remove(id); } } public NavigableMap<Instant, byte[]> getMeta(ApplicationId id) { return meta.get(id); } public NavigableMap<Instant, byte[]> getMeta(DeploymentId id) { return metaManual.get(id); } }
class ApplicationStoreMock implements ApplicationStore { private static final byte[] tombstone = new byte[0]; private final Map<ApplicationId, Map<ApplicationVersion, byte[]>> store = new ConcurrentHashMap<>(); private final Map<DeploymentId, byte[]> devStore = new ConcurrentHashMap<>(); private final Map<ApplicationId, Map<Long, byte[]>> diffs = new ConcurrentHashMap<>(); private final Map<DeploymentId, Map<Long, byte[]>> devDiffs = new ConcurrentHashMap<>(); private final Map<ApplicationId, NavigableMap<Instant, byte[]>> meta = new ConcurrentHashMap<>(); private final Map<DeploymentId, NavigableMap<Instant, byte[]>> metaManual = new ConcurrentHashMap<>(); private static ApplicationId appId(TenantName tenant, ApplicationName application) { return ApplicationId.from(tenant, application, InstanceName.defaultName()); } private static ApplicationId testerId(TenantName tenant, ApplicationName application) { return TesterId.of(appId(tenant, application)).id(); } @Override public byte[] get(DeploymentId deploymentId, ApplicationVersion applicationVersion) { if (applicationVersion.isDeployedDirectly()) return requireNonNull(devStore.get(deploymentId)); TenantAndApplicationId tenantAndApplicationId = TenantAndApplicationId.from(deploymentId.applicationId()); byte[] bytes = store.get(appId(tenantAndApplicationId.tenant(), tenantAndApplicationId.application())).get(applicationVersion); if (bytes == null) throw new IllegalArgumentException("No application package found for " + tenantAndApplicationId + " with version " + applicationVersion.id()); return bytes; } @Override public Optional<byte[]> getDiff(TenantName tenantName, ApplicationName applicationName, long buildNumber) { return Optional.ofNullable(diffs.get(appId(tenantName, applicationName))).map(map -> map.get(buildNumber)); } @Override @Override public Optional<byte[]> find(TenantName tenant, ApplicationName application, long buildNumber) { return store.getOrDefault(appId(tenant, application), Map.of()).entrySet().stream() .filter(kv -> kv.getKey().buildNumber().orElse(Long.MIN_VALUE) == buildNumber) .map(Map.Entry::getValue) .findFirst(); } @Override public void put(TenantName tenant, ApplicationName application, ApplicationVersion applicationVersion, byte[] applicationPackage, byte[] diff) { store.computeIfAbsent(appId(tenant, application), __ -> new ConcurrentHashMap<>()).put(applicationVersion, applicationPackage); applicationVersion.buildNumber().ifPresent(buildNumber -> diffs.computeIfAbsent(appId(tenant, application), __ -> new ConcurrentHashMap<>()).put(buildNumber, diff)); } @Override public boolean prune(TenantName tenant, ApplicationName application, ApplicationVersion oldestToRetain) { return store.containsKey(appId(tenant, application)) && store.get(appId(tenant, application)).keySet().removeIf(version -> version.compareTo(oldestToRetain) < 0); } @Override public void removeAll(TenantName tenant, ApplicationName application) { store.remove(appId(tenant, application)); } @Override public byte[] getTester(TenantName tenant, ApplicationName application, ApplicationVersion applicationVersion) { return requireNonNull(store.get(testerId(tenant, application)).get(applicationVersion)); } @Override public void putTester(TenantName tenant, ApplicationName application, ApplicationVersion applicationVersion, byte[] testerPackage) { store.computeIfAbsent(testerId(tenant, application), key -> new ConcurrentHashMap<>()) .put(applicationVersion, testerPackage); } @Override public boolean pruneTesters(TenantName tenant, ApplicationName application, ApplicationVersion oldestToRetain) { return store.containsKey(testerId(tenant, application)) && store.get(testerId(tenant, application)).keySet().removeIf(version -> version.compareTo(oldestToRetain) < 0); } @Override public void removeAllTesters(TenantName tenant, ApplicationName application) { store.remove(testerId(tenant, application)); } @Override public Optional<byte[]> getDevDiff(DeploymentId deploymentId, long buildNumber) { return Optional.ofNullable(devDiffs.get(deploymentId)).map(map -> map.get(buildNumber)); } @Override public void pruneDevDiffs(DeploymentId deploymentId, long beforeBuildNumber) { Optional.ofNullable(devDiffs.get(deploymentId)) .ifPresent(map -> map.keySet().removeIf(buildNumber -> buildNumber < beforeBuildNumber)); } @Override public void putDev(DeploymentId deploymentId, ApplicationVersion applicationVersion, byte[] applicationPackage, byte[] diff) { devStore.put(deploymentId, applicationPackage); applicationVersion.buildNumber().ifPresent(buildNumber -> devDiffs.computeIfAbsent(deploymentId, __ -> new ConcurrentHashMap<>()).put(buildNumber, diff)); } @Override public void putMeta(TenantName tenant, ApplicationName application, Instant now, byte[] metaZip) { meta.putIfAbsent(appId(tenant, application), new ConcurrentSkipListMap<>()); meta.get(appId(tenant, application)).put(now, metaZip); } @Override public void putMetaTombstone(TenantName tenant, ApplicationName application, Instant now) { putMeta(tenant, application, now, tombstone); } @Override public void putMeta(DeploymentId id, Instant now, byte[] metaZip) { metaManual.computeIfAbsent(id, __ -> new ConcurrentSkipListMap<>()).put(now, metaZip); } @Override public void putMetaTombstone(DeploymentId id, Instant now) { putMeta(id, now, tombstone); } @Override public void pruneMeta(Instant oldest) { for (ApplicationId id : meta.keySet()) { Instant activeAtOldest = meta.get(id).lowerKey(oldest); if (activeAtOldest != null) meta.get(id).headMap(activeAtOldest).clear(); if (meta.get(id).lastKey().isBefore(oldest) && meta.get(id).lastEntry().getValue() == tombstone) meta.remove(id); } } public NavigableMap<Instant, byte[]> getMeta(ApplicationId id) { return meta.get(id); } public NavigableMap<Instant, byte[]> getMeta(DeploymentId id) { return metaManual.get(id); } }
When the exception is added to the log entry, the message will also be printed with the exception.
static String asErrorJson(String message) { try { return new ObjectMapper().writeValueAsString(Map.of("error", message)); } catch (JsonProcessingException e) { log.log(WARNING, "Could not encode error message to json:", e); return "Could not encode error message to json, check the log for details."; } }
log.log(WARNING, "Could not encode error message to json:", e);
static String asErrorJson(String message) { try { return new ObjectMapper().writeValueAsString(Map.of("error", message)); } catch (JsonProcessingException e) { log.log(WARNING, "Could not encode error message to json:", e); return "Could not encode error message to json, check the log for details."; } }
class ErrorResponse extends JsonResponse { private static Logger log = Logger.getLogger(ErrorResponse.class.getName()); ErrorResponse(int code, String message) { super(code, asErrorJson(message)); } }
class ErrorResponse extends JsonResponse { private static Logger log = Logger.getLogger(ErrorResponse.class.getName()); ErrorResponse(int code, String message) { super(code, asErrorJson(message)); } }
Will fix in a new PR.
private JsonResponse v1Response(URI requestUri) { try { return new JsonResponse(OK, v1Content(requestUri)); } catch (JSONException e) { log.warning("Bad JSON construction in " + V1_PATH + " response: " + e.getMessage()); return new ErrorResponse(INTERNAL_SERVER_ERROR, "An error occurred, please try path '" + VALUES_PATH + "'"); } }
log.warning("Bad JSON construction in " + V1_PATH + " response: " + e.getMessage());
private JsonResponse v1Response(URI requestUri) { try { return new JsonResponse(OK, v1Content(requestUri)); } catch (JSONException e) { log.warning("Bad JSON construction in " + V1_PATH + " response: " + e.getMessage()); return new ErrorResponse(INTERNAL_SERVER_ERROR, "An error occurred, please try path '" + VALUES_PATH + "'"); } }
class MetricsHandler extends ThreadedHttpRequestHandler { static final String V1_PATH = "/metrics/v1"; static final String VALUES_PATH = V1_PATH + "/values"; private final ValuesFetcher valuesFetcher; @Inject public MetricsHandler(Executor executor, MetricsManager metricsManager, VespaServices vespaServices, MetricsConsumers metricsConsumers) { super(executor); valuesFetcher = new ValuesFetcher(metricsManager, vespaServices, metricsConsumers); } @Override public HttpResponse handle(HttpRequest request) { if (request.getMethod() != GET) return new JsonResponse(METHOD_NOT_ALLOWED, "Only GET is supported"); Path path = new Path(request.getUri()); if (path.matches(V1_PATH)) return v1Response(request.getUri()); if (path.matches(VALUES_PATH)) return valuesResponse(request); return new ErrorResponse(NOT_FOUND, "No content at given path"); } private JsonResponse valuesResponse(HttpRequest request) { try { return new JsonResponse(OK, valuesFetcher.fetch(request.getProperty("consumer"))); } catch (JsonRenderingException e) { return new ErrorResponse(INTERNAL_SERVER_ERROR, e.getMessage()); } } private String v1Content(URI requestUri) throws JSONException { int port = requestUri.getPort(); String host = requestUri.getHost(); StringBuilder base = new StringBuilder("http: base.append(host); if (port >= 0) { base.append(":").append(port); } String uriBase = base.toString(); JSONArray linkList = new JSONArray(); for (String api : new String[] {VALUES_PATH}) { JSONObject resource = new JSONObject(); resource.put("url", uriBase + api); linkList.put(resource); } return new JSONObject().put("resources", linkList).toString(4); } }
class MetricsHandler extends ThreadedHttpRequestHandler { static final String V1_PATH = "/metrics/v1"; static final String VALUES_PATH = V1_PATH + "/values"; private final ValuesFetcher valuesFetcher; @Inject public MetricsHandler(Executor executor, MetricsManager metricsManager, VespaServices vespaServices, MetricsConsumers metricsConsumers) { super(executor); valuesFetcher = new ValuesFetcher(metricsManager, vespaServices, metricsConsumers); } @Override public HttpResponse handle(HttpRequest request) { if (request.getMethod() != GET) return new JsonResponse(METHOD_NOT_ALLOWED, "Only GET is supported"); Path path = new Path(request.getUri()); if (path.matches(V1_PATH)) return v1Response(request.getUri()); if (path.matches(VALUES_PATH)) return valuesResponse(request); return new ErrorResponse(NOT_FOUND, "No content at given path"); } private JsonResponse valuesResponse(HttpRequest request) { try { return new JsonResponse(OK, valuesFetcher.fetch(request.getProperty("consumer"))); } catch (JsonRenderingException e) { return new ErrorResponse(INTERNAL_SERVER_ERROR, e.getMessage()); } } private String v1Content(URI requestUri) throws JSONException { int port = requestUri.getPort(); String host = requestUri.getHost(); StringBuilder base = new StringBuilder("http: base.append(host); if (port >= 0) { base.append(":").append(port); } String uriBase = base.toString(); JSONArray linkList = new JSONArray(); for (String api : new String[] {VALUES_PATH}) { JSONObject resource = new JSONObject(); resource.put("url", uriBase + api); linkList.put(resource); } return new JSONObject().put("resources", linkList).toString(4); } }
Great, they are thread safe.
static String asErrorJson(String message) { try { return new ObjectMapper().writeValueAsString(Map.of("error", message)); } catch (JsonProcessingException e) { log.log(WARNING, "Could not encode error message to json:", e); return "Could not encode error message to json, check the log for details."; } }
return new ObjectMapper().writeValueAsString(Map.of("error", message));
static String asErrorJson(String message) { try { return new ObjectMapper().writeValueAsString(Map.of("error", message)); } catch (JsonProcessingException e) { log.log(WARNING, "Could not encode error message to json:", e); return "Could not encode error message to json, check the log for details."; } }
class ErrorResponse extends JsonResponse { private static Logger log = Logger.getLogger(ErrorResponse.class.getName()); ErrorResponse(int code, String message) { super(code, asErrorJson(message)); } }
class ErrorResponse extends JsonResponse { private static Logger log = Logger.getLogger(ErrorResponse.class.getName()); ErrorResponse(int code, String message) { super(code, asErrorJson(message)); } }
Agree, fixed.
void setNodeState(Node node, NodeState ns) { validateInput(node, ns); int index = node.getIndex(); if (index >= maxIndex) { maxIndex = index + 1; } setNodeStateInternal(index, ns); }
maxIndex = index + 1;
void setNodeState(Node node, NodeState ns) { validateInput(node, ns); int index = node.getIndex(); if (index >= logicalNodeCount) { logicalNodeCount = index + 1; } setNodeStateInternal(index, ns); }
class Nodes { private int maxIndex; private final NodeType type; private final BitSet upNodes; private final Map<Integer, NodeState> nodeStates = new HashMap<>(); Nodes(NodeType type) { this.type = type; upNodes = new BitSet(); } Nodes(Nodes b) { maxIndex = b.maxIndex; type = b.type; upNodes = (BitSet) b.upNodes.clone(); b.nodeStates.forEach((key, value) -> nodeStates.put(key, value.clone())); } void updateMaxIndex(int index) { if (index > maxIndex) { upNodes.set(maxIndex, index); maxIndex = index; } } int getMaxIndex() { return maxIndex; } NodeState getNodeState(int index) { NodeState ns = nodeStates.get(index); if (ns != null) return ns; return (index >= getMaxIndex() || ! upNodes.get(index)) ? defaultDown() : defaultUp(); } private void validateInput(Node node, NodeState ns) { ns.verifyValidInSystemState(node.getType()); if (node.getType() != type) { throw new IllegalArgumentException("NodeType '" + node.getType() + "' differs from '" + type + "'"); } } void addNodeState(Node node, NodeState ns) { validateInput(node, ns); int index = node.getIndex(); updateMaxIndex(index + 1); setNodeStateInternal(index, ns); } private static boolean equalsWithDescription(NodeState a, NodeState b) { return a.equals(b) && ((a.getState() != State.DOWN) || a.getDescription().equals(b.getDescription())); } private void setNodeStateInternal(int index, NodeState ns) { nodeStates.remove(index); if (ns.getState() == State.DOWN) { upNodes.clear(index); if ( ! equalsWithDescription(defaultDown(), ns)) { nodeStates.put(index, ns); } } else { upNodes.set(index); if ( ! equalsWithDescription(defaultUp(), ns)) { nodeStates.put(index, ns); } } } boolean similarToImpl(Nodes other, final NodeStateCmp nodeStateCmp) { if (maxIndex != other.maxIndex) return false; if (type != other.type) return false; if ( ! upNodes.equals(other.upNodes)) return false; for (Integer node : unionNodeSetWith(other.nodeStates.keySet())) { final NodeState lhs = nodeStates.get(node); final NodeState rhs = other.nodeStates.get(node); if (!nodeStateCmp.similar(type, lhs, rhs)) { return false; } } return true; } private Set<Integer> unionNodeSetWith(final Set<Integer> otherNodes) { final Set<Integer> unionNodeSet = new HashSet<>(nodeStates.keySet()); unionNodeSet.addAll(otherNodes); return unionNodeSet; } @Override public String toString() { return toString(false); } String toString(boolean verbose) { StringBuilder sb = new StringBuilder(); int nodeCount = verbose ? getMaxIndex() : upNodes.length(); if ( nodeCount > 0 ) { sb.append(type == NodeType.DISTRIBUTOR ? " distributor:" : " storage:").append(nodeCount); for (int i = 0; i < nodeCount; i++) { String nodeState = getNodeState(i).serialize(i, verbose); if (!nodeState.isEmpty()) { sb.append(' ').append(nodeState); } } } return sb.toString(); } @Override public boolean equals(Object obj) { if (! (obj instanceof Nodes)) return false; Nodes b = (Nodes) obj; if (maxIndex != b.maxIndex) return false; if (type != b.type) return false; if (!upNodes.equals(b.upNodes)) return false; if (!nodeStates.equals(b.nodeStates)) return false; return true; } @Override public int hashCode() { return Objects.hash(maxIndex, type, nodeStates, upNodes); } private NodeState defaultDown() { return type == NodeType.STORAGE ? DEFAULT_STORAGE_DOWN_NODE_STATE : DEFAULT_DISTRIBUTOR_DOWN_NODE_STATE; } private NodeState defaultUp() { return defaultUpNodeState(type); } }
class Nodes { private int logicalNodeCount; private final NodeType type; private final BitSet upNodes; private final Map<Integer, NodeState> nodeStates = new HashMap<>(); Nodes(NodeType type) { this.type = type; upNodes = new BitSet(); } Nodes(Nodes b) { logicalNodeCount = b.logicalNodeCount; type = b.type; upNodes = (BitSet) b.upNodes.clone(); b.nodeStates.forEach((key, value) -> nodeStates.put(key, value.clone())); } void updateMaxIndex(int index) { if (index > logicalNodeCount) { upNodes.set(logicalNodeCount, index); logicalNodeCount = index; } } int getLogicalNodeCount() { return logicalNodeCount; } NodeState getNodeState(int index) { NodeState ns = nodeStates.get(index); if (ns != null) return ns; return (index >= getLogicalNodeCount() || ! upNodes.get(index)) ? new NodeState(type, State.DOWN) : new NodeState(type, State.UP); } private void validateInput(Node node, NodeState ns) { ns.verifyValidInSystemState(node.getType()); if (node.getType() != type) { throw new IllegalArgumentException("NodeType '" + node.getType() + "' differs from '" + type + "'"); } } void addNodeState(Node node, NodeState ns) { validateInput(node, ns); int index = node.getIndex(); updateMaxIndex(index + 1); setNodeStateInternal(index, ns); } private static boolean equalsWithDescription(NodeState a, NodeState b) { return a.equals(b) && ((a.getState() != State.DOWN) || a.getDescription().equals(b.getDescription())); } private void setNodeStateInternal(int index, NodeState ns) { nodeStates.remove(index); if (ns.getState() == State.DOWN) { upNodes.clear(index); if ( ! equalsWithDescription(defaultDown(), ns)) { nodeStates.put(index, ns); } } else { upNodes.set(index); if ( ! equalsWithDescription(defaultUp(), ns)) { nodeStates.put(index, ns); } } } boolean similarToImpl(Nodes other, final NodeStateCmp nodeStateCmp) { if (logicalNodeCount != other.logicalNodeCount) return false; if (type != other.type) return false; if ( ! upNodes.equals(other.upNodes)) return false; for (Integer node : unionNodeSetWith(other.nodeStates.keySet())) { final NodeState lhs = nodeStates.get(node); final NodeState rhs = other.nodeStates.get(node); if (!nodeStateCmp.similar(type, lhs, rhs)) { return false; } } return true; } private Set<Integer> unionNodeSetWith(final Set<Integer> otherNodes) { final Set<Integer> unionNodeSet = new HashSet<>(nodeStates.keySet()); unionNodeSet.addAll(otherNodes); return unionNodeSet; } @Override public String toString() { return toString(false); } String toString(boolean verbose) { StringBuilder sb = new StringBuilder(); int nodeCount = verbose ? getLogicalNodeCount() : upNodes.length(); if ( nodeCount > 0 ) { sb.append(type == NodeType.DISTRIBUTOR ? " distributor:" : " storage:").append(nodeCount); for (int i = 0; i < nodeCount; i++) { String nodeState = getNodeState(i).serialize(i, verbose); if (!nodeState.isEmpty()) { sb.append(' ').append(nodeState); } } } return sb.toString(); } @Override public boolean equals(Object obj) { if (! (obj instanceof Nodes)) return false; Nodes b = (Nodes) obj; if (logicalNodeCount != b.logicalNodeCount) return false; if (type != b.type) return false; if (!upNodes.equals(b.upNodes)) return false; if (!nodeStates.equals(b.nodeStates)) return false; return true; } @Override public int hashCode() { return Objects.hash(logicalNodeCount, type, nodeStates, upNodes); } private NodeState defaultDown() { return type == NodeType.STORAGE ? DEFAULT_STORAGE_DOWN_NODE_STATE : DEFAULT_DISTRIBUTOR_DOWN_NODE_STATE; } private NodeState defaultUp() { return defaultUpNodeState(type); } }
You need to make sure this method never throws any exception or that will stop the `ScheduledExecutorService`.
public void run() { deleteUnusedFiles(fileReferencesDownloadDir); deleteUnusedFiles(urlDownloadDir); }
deleteUnusedFiles(fileReferencesDownloadDir);
public void run() { try { deleteUnusedFiles(fileReferencesDownloadDir); deleteUnusedFiles(urlDownloadDir); } catch (Throwable t) { log.log(Level.WARNING, "Deleting unused files failed. ", t); } }
class CachedFilesMaintainer implements Runnable { private final static Logger log = Logger.getLogger(CachedFilesMaintainer.class.getName()); private static final File defaultUrlDownloadDir = UrlDownloadRpcServer.downloadDir; private static final File defaultFileReferencesDownloadDir = FileDownloader.defaultDownloadDirectory; private static final Duration defaultDurationToKeepFiles = Duration.ofDays(30); private final File urlDownloadDir; private final File fileReferencesDownloadDir; private final Duration durationToKeepFiles; CachedFilesMaintainer() { this(defaultFileReferencesDownloadDir, defaultUrlDownloadDir, defaultDurationToKeepFiles); } CachedFilesMaintainer(File fileReferencesDownloadDir, File urlDownloadDir, Duration durationToKeepFiles) { this.fileReferencesDownloadDir = fileReferencesDownloadDir; this.urlDownloadDir = urlDownloadDir; this.durationToKeepFiles = durationToKeepFiles; } @Override private void deleteUnusedFiles(File directory) { Instant deleteNotUsedSinceInstant = Instant.now().minus(durationToKeepFiles); Set<String> filesOnDisk = new HashSet<>(); File[] files = directory.listFiles(); if (files != null) filesOnDisk.addAll(Arrays.stream(files).map(File::getName).collect(Collectors.toSet())); log.log(LogLevel.DEBUG, "Files on disk (in " + directory + "): " + filesOnDisk); Set<String> filesToDelete = filesOnDisk .stream() .filter(fileReference -> isFileLastModifiedBefore(new File(directory, fileReference), deleteNotUsedSinceInstant)) .collect(Collectors.toSet()); if (filesToDelete.size() > 0) { log.log(LogLevel.INFO, "Files that can be deleted in " + directory + " (not used since " + deleteNotUsedSinceInstant + "): " + filesToDelete); filesToDelete.forEach(fileReference -> { File file = new File(directory, fileReference); if (!IOUtils.recursiveDeleteDir(file)) log.log(LogLevel.WARNING, "Could not delete " + file.getAbsolutePath()); }); } } private boolean isFileLastModifiedBefore(File fileReference, Instant instant) { BasicFileAttributes fileAttributes; try { fileAttributes = readAttributes(fileReference.toPath(), BasicFileAttributes.class); return fileAttributes.lastModifiedTime().toInstant().isBefore(instant); } catch (IOException e) { throw new UncheckedIOException(e); } } }
class CachedFilesMaintainer implements Runnable { private final static Logger log = Logger.getLogger(CachedFilesMaintainer.class.getName()); private static final File defaultUrlDownloadDir = UrlDownloadRpcServer.downloadDir; private static final File defaultFileReferencesDownloadDir = FileDownloader.defaultDownloadDirectory; private static final Duration defaultDurationToKeepFiles = Duration.ofDays(30); private final File urlDownloadDir; private final File fileReferencesDownloadDir; private final Duration durationToKeepFiles; CachedFilesMaintainer() { this(defaultFileReferencesDownloadDir, defaultUrlDownloadDir, defaultDurationToKeepFiles); } CachedFilesMaintainer(File fileReferencesDownloadDir, File urlDownloadDir, Duration durationToKeepFiles) { this.fileReferencesDownloadDir = fileReferencesDownloadDir; this.urlDownloadDir = urlDownloadDir; this.durationToKeepFiles = durationToKeepFiles; } @Override private void deleteUnusedFiles(File directory) { Instant deleteNotUsedSinceInstant = Instant.now().minus(durationToKeepFiles); Set<String> filesOnDisk = new HashSet<>(); File[] files = directory.listFiles(); if (files != null) filesOnDisk.addAll(Arrays.stream(files).map(File::getName).collect(Collectors.toSet())); log.log(LogLevel.DEBUG, "Files on disk (in " + directory + "): " + filesOnDisk); Set<String> filesToDelete = filesOnDisk .stream() .filter(fileReference -> isFileLastModifiedBefore(new File(directory, fileReference), deleteNotUsedSinceInstant)) .collect(Collectors.toSet()); if (filesToDelete.size() > 0) { log.log(LogLevel.INFO, "Files that can be deleted in " + directory + " (not used since " + deleteNotUsedSinceInstant + "): " + filesToDelete); filesToDelete.forEach(fileReference -> { File file = new File(directory, fileReference); if (!IOUtils.recursiveDeleteDir(file)) log.log(LogLevel.WARNING, "Could not delete " + file.getAbsolutePath()); }); } } private boolean isFileLastModifiedBefore(File fileReference, Instant instant) { BasicFileAttributes fileAttributes; try { fileAttributes = readAttributes(fileReference.toPath(), BasicFileAttributes.class); return fileAttributes.lastModifiedTime().toInstant().isBefore(instant); } catch (IOException e) { throw new UncheckedIOException(e); } } }
Good point, fixed
public void run() { deleteUnusedFiles(fileReferencesDownloadDir); deleteUnusedFiles(urlDownloadDir); }
deleteUnusedFiles(fileReferencesDownloadDir);
public void run() { try { deleteUnusedFiles(fileReferencesDownloadDir); deleteUnusedFiles(urlDownloadDir); } catch (Throwable t) { log.log(Level.WARNING, "Deleting unused files failed. ", t); } }
class CachedFilesMaintainer implements Runnable { private final static Logger log = Logger.getLogger(CachedFilesMaintainer.class.getName()); private static final File defaultUrlDownloadDir = UrlDownloadRpcServer.downloadDir; private static final File defaultFileReferencesDownloadDir = FileDownloader.defaultDownloadDirectory; private static final Duration defaultDurationToKeepFiles = Duration.ofDays(30); private final File urlDownloadDir; private final File fileReferencesDownloadDir; private final Duration durationToKeepFiles; CachedFilesMaintainer() { this(defaultFileReferencesDownloadDir, defaultUrlDownloadDir, defaultDurationToKeepFiles); } CachedFilesMaintainer(File fileReferencesDownloadDir, File urlDownloadDir, Duration durationToKeepFiles) { this.fileReferencesDownloadDir = fileReferencesDownloadDir; this.urlDownloadDir = urlDownloadDir; this.durationToKeepFiles = durationToKeepFiles; } @Override private void deleteUnusedFiles(File directory) { Instant deleteNotUsedSinceInstant = Instant.now().minus(durationToKeepFiles); Set<String> filesOnDisk = new HashSet<>(); File[] files = directory.listFiles(); if (files != null) filesOnDisk.addAll(Arrays.stream(files).map(File::getName).collect(Collectors.toSet())); log.log(LogLevel.DEBUG, "Files on disk (in " + directory + "): " + filesOnDisk); Set<String> filesToDelete = filesOnDisk .stream() .filter(fileReference -> isFileLastModifiedBefore(new File(directory, fileReference), deleteNotUsedSinceInstant)) .collect(Collectors.toSet()); if (filesToDelete.size() > 0) { log.log(LogLevel.INFO, "Files that can be deleted in " + directory + " (not used since " + deleteNotUsedSinceInstant + "): " + filesToDelete); filesToDelete.forEach(fileReference -> { File file = new File(directory, fileReference); if (!IOUtils.recursiveDeleteDir(file)) log.log(LogLevel.WARNING, "Could not delete " + file.getAbsolutePath()); }); } } private boolean isFileLastModifiedBefore(File fileReference, Instant instant) { BasicFileAttributes fileAttributes; try { fileAttributes = readAttributes(fileReference.toPath(), BasicFileAttributes.class); return fileAttributes.lastModifiedTime().toInstant().isBefore(instant); } catch (IOException e) { throw new UncheckedIOException(e); } } }
class CachedFilesMaintainer implements Runnable { private final static Logger log = Logger.getLogger(CachedFilesMaintainer.class.getName()); private static final File defaultUrlDownloadDir = UrlDownloadRpcServer.downloadDir; private static final File defaultFileReferencesDownloadDir = FileDownloader.defaultDownloadDirectory; private static final Duration defaultDurationToKeepFiles = Duration.ofDays(30); private final File urlDownloadDir; private final File fileReferencesDownloadDir; private final Duration durationToKeepFiles; CachedFilesMaintainer() { this(defaultFileReferencesDownloadDir, defaultUrlDownloadDir, defaultDurationToKeepFiles); } CachedFilesMaintainer(File fileReferencesDownloadDir, File urlDownloadDir, Duration durationToKeepFiles) { this.fileReferencesDownloadDir = fileReferencesDownloadDir; this.urlDownloadDir = urlDownloadDir; this.durationToKeepFiles = durationToKeepFiles; } @Override private void deleteUnusedFiles(File directory) { Instant deleteNotUsedSinceInstant = Instant.now().minus(durationToKeepFiles); Set<String> filesOnDisk = new HashSet<>(); File[] files = directory.listFiles(); if (files != null) filesOnDisk.addAll(Arrays.stream(files).map(File::getName).collect(Collectors.toSet())); log.log(LogLevel.DEBUG, "Files on disk (in " + directory + "): " + filesOnDisk); Set<String> filesToDelete = filesOnDisk .stream() .filter(fileReference -> isFileLastModifiedBefore(new File(directory, fileReference), deleteNotUsedSinceInstant)) .collect(Collectors.toSet()); if (filesToDelete.size() > 0) { log.log(LogLevel.INFO, "Files that can be deleted in " + directory + " (not used since " + deleteNotUsedSinceInstant + "): " + filesToDelete); filesToDelete.forEach(fileReference -> { File file = new File(directory, fileReference); if (!IOUtils.recursiveDeleteDir(file)) log.log(LogLevel.WARNING, "Could not delete " + file.getAbsolutePath()); }); } } private boolean isFileLastModifiedBefore(File fileReference, Instant instant) { BasicFileAttributes fileAttributes; try { fileAttributes = readAttributes(fileReference.toPath(), BasicFileAttributes.class); return fileAttributes.lastModifiedTime().toInstant().isBefore(instant); } catch (IOException e) { throw new UncheckedIOException(e); } } }
Consider using a named constant for the 1GB reserved, rather than '1'.
private void tuneWriteFilter(ProtonConfig.Writefilter.Builder builder) { double defaultMemoryLimit = new ProtonConfig.Writefilter(new ProtonConfig.Writefilter.Builder()).memorylimit(); double scaledMemoryLimit = ((nodeFlavor.getMinMainMemoryAvailableGb() - 1) * defaultMemoryLimit) / nodeFlavor.getMinMainMemoryAvailableGb(); builder.memorylimit(scaledMemoryLimit); }
double scaledMemoryLimit = ((nodeFlavor.getMinMainMemoryAvailableGb() - 1) * defaultMemoryLimit) / nodeFlavor.getMinMainMemoryAvailableGb();
private void tuneWriteFilter(ProtonConfig.Writefilter.Builder builder) { double reservedMemoryGb = 1; double defaultMemoryLimit = new ProtonConfig.Writefilter(new ProtonConfig.Writefilter.Builder()).memorylimit(); double scaledMemoryLimit = ((nodeFlavor.getMinMainMemoryAvailableGb() - reservedMemoryGb) * defaultMemoryLimit) / nodeFlavor.getMinMainMemoryAvailableGb(); builder.memorylimit(scaledMemoryLimit); }
class NodeFlavorTuning implements ProtonConfig.Producer { static long MB = 1024 * 1024; static long GB = MB * 1024; private final Flavor nodeFlavor; public NodeFlavorTuning(Flavor nodeFlavor) { this.nodeFlavor = nodeFlavor; } @Override public void getConfig(ProtonConfig.Builder builder) { setHwInfo(builder); tuneDiskWriteSpeed(builder); tuneDocumentStoreMaxFileSize(builder.summary.log); tuneFlushStrategyMemoryLimits(builder.flush.memory); tuneFlushStrategyTlsSize(builder.flush.memory); tuneSummaryReadIo(builder.summary.read); tuneSummaryCache(builder.summary.cache); tuneSearchReadIo(builder.search.mmap); tuneWriteFilter(builder.writefilter); for (ProtonConfig.Documentdb.Builder dbb : builder.documentdb) { getConfig(dbb); } } private void getConfig(ProtonConfig.Documentdb.Builder builder) { ProtonConfig.Documentdb dbCfg = builder.build(); if (dbCfg.mode() != ProtonConfig.Documentdb.Mode.Enum.INDEX) { long numDocs = (long)nodeFlavor.getMinMainMemoryAvailableGb()*GB/64L; builder.allocation.initialnumdocs(numDocs); } } private void tuneSummaryCache(ProtonConfig.Summary.Cache.Builder builder) { long memoryLimitBytes = (long) ((nodeFlavor.getMinMainMemoryAvailableGb() * 0.05) * GB); builder.maxbytes(memoryLimitBytes); } private void setHwInfo(ProtonConfig.Builder builder) { builder.hwinfo.disk.size((long)nodeFlavor.getMinDiskAvailableGb() * GB); builder.hwinfo.disk.shared(nodeFlavor.getType().equals(Flavor.Type.DOCKER_CONTAINER)); builder.hwinfo.memory.size((long)nodeFlavor.getMinMainMemoryAvailableGb() * GB); builder.hwinfo.cpu.cores((int)nodeFlavor.getMinCpuCores()); } private void tuneDiskWriteSpeed(ProtonConfig.Builder builder) { if (!nodeFlavor.hasFastDisk()) { builder.hwinfo.disk.writespeed(40); } } private void tuneDocumentStoreMaxFileSize(ProtonConfig.Summary.Log.Builder builder) { double memoryGb = nodeFlavor.getMinMainMemoryAvailableGb(); long fileSizeBytes = 4 * GB; if (memoryGb <= 12.0) { fileSizeBytes = 256 * MB; } else if (memoryGb < 24.0) { fileSizeBytes = 512 * MB; } else if (memoryGb <= 64.0) { fileSizeBytes = 1 * GB; } builder.maxfilesize(fileSizeBytes); } private void tuneFlushStrategyMemoryLimits(ProtonConfig.Flush.Memory.Builder builder) { long memoryLimitBytes = (long) ((nodeFlavor.getMinMainMemoryAvailableGb() / 8) * GB); builder.maxmemory(memoryLimitBytes); builder.each.maxmemory(memoryLimitBytes); } private void tuneFlushStrategyTlsSize(ProtonConfig.Flush.Memory.Builder builder) { long tlsSizeBytes = (long) ((nodeFlavor.getMinDiskAvailableGb() * 0.07) * GB); tlsSizeBytes = min(tlsSizeBytes, 100 * GB); builder.maxtlssize(tlsSizeBytes); } private void tuneSummaryReadIo(ProtonConfig.Summary.Read.Builder builder) { if (nodeFlavor.hasFastDisk()) { builder.io(ProtonConfig.Summary.Read.Io.DIRECTIO); } } private void tuneSearchReadIo(ProtonConfig.Search.Mmap.Builder builder) { if (nodeFlavor.hasFastDisk()) { builder.advise(ProtonConfig.Search.Mmap.Advise.RANDOM); } } }
class NodeFlavorTuning implements ProtonConfig.Producer { static long MB = 1024 * 1024; static long GB = MB * 1024; private final Flavor nodeFlavor; public NodeFlavorTuning(Flavor nodeFlavor) { this.nodeFlavor = nodeFlavor; } @Override public void getConfig(ProtonConfig.Builder builder) { setHwInfo(builder); tuneDiskWriteSpeed(builder); tuneDocumentStoreMaxFileSize(builder.summary.log); tuneFlushStrategyMemoryLimits(builder.flush.memory); tuneFlushStrategyTlsSize(builder.flush.memory); tuneSummaryReadIo(builder.summary.read); tuneSummaryCache(builder.summary.cache); tuneSearchReadIo(builder.search.mmap); tuneWriteFilter(builder.writefilter); for (ProtonConfig.Documentdb.Builder dbb : builder.documentdb) { getConfig(dbb); } } private void getConfig(ProtonConfig.Documentdb.Builder builder) { ProtonConfig.Documentdb dbCfg = builder.build(); if (dbCfg.mode() != ProtonConfig.Documentdb.Mode.Enum.INDEX) { long numDocs = (long)nodeFlavor.getMinMainMemoryAvailableGb()*GB/64L; builder.allocation.initialnumdocs(numDocs); } } private void tuneSummaryCache(ProtonConfig.Summary.Cache.Builder builder) { long memoryLimitBytes = (long) ((nodeFlavor.getMinMainMemoryAvailableGb() * 0.05) * GB); builder.maxbytes(memoryLimitBytes); } private void setHwInfo(ProtonConfig.Builder builder) { builder.hwinfo.disk.size((long)nodeFlavor.getMinDiskAvailableGb() * GB); builder.hwinfo.disk.shared(nodeFlavor.getType().equals(Flavor.Type.DOCKER_CONTAINER)); builder.hwinfo.memory.size((long)nodeFlavor.getMinMainMemoryAvailableGb() * GB); builder.hwinfo.cpu.cores((int)nodeFlavor.getMinCpuCores()); } private void tuneDiskWriteSpeed(ProtonConfig.Builder builder) { if (!nodeFlavor.hasFastDisk()) { builder.hwinfo.disk.writespeed(40); } } private void tuneDocumentStoreMaxFileSize(ProtonConfig.Summary.Log.Builder builder) { double memoryGb = nodeFlavor.getMinMainMemoryAvailableGb(); long fileSizeBytes = 4 * GB; if (memoryGb <= 12.0) { fileSizeBytes = 256 * MB; } else if (memoryGb < 24.0) { fileSizeBytes = 512 * MB; } else if (memoryGb <= 64.0) { fileSizeBytes = 1 * GB; } builder.maxfilesize(fileSizeBytes); } private void tuneFlushStrategyMemoryLimits(ProtonConfig.Flush.Memory.Builder builder) { long memoryLimitBytes = (long) ((nodeFlavor.getMinMainMemoryAvailableGb() / 8) * GB); builder.maxmemory(memoryLimitBytes); builder.each.maxmemory(memoryLimitBytes); } private void tuneFlushStrategyTlsSize(ProtonConfig.Flush.Memory.Builder builder) { long tlsSizeBytes = (long) ((nodeFlavor.getMinDiskAvailableGb() * 0.07) * GB); tlsSizeBytes = min(tlsSizeBytes, 100 * GB); builder.maxtlssize(tlsSizeBytes); } private void tuneSummaryReadIo(ProtonConfig.Summary.Read.Builder builder) { if (nodeFlavor.hasFastDisk()) { builder.io(ProtonConfig.Summary.Read.Io.DIRECTIO); } } private void tuneSearchReadIo(ProtonConfig.Search.Mmap.Builder builder) { if (nodeFlavor.hasFastDisk()) { builder.advise(ProtonConfig.Search.Mmap.Advise.RANDOM); } } }
Fixed
private void tuneWriteFilter(ProtonConfig.Writefilter.Builder builder) { double defaultMemoryLimit = new ProtonConfig.Writefilter(new ProtonConfig.Writefilter.Builder()).memorylimit(); double scaledMemoryLimit = ((nodeFlavor.getMinMainMemoryAvailableGb() - 1) * defaultMemoryLimit) / nodeFlavor.getMinMainMemoryAvailableGb(); builder.memorylimit(scaledMemoryLimit); }
double scaledMemoryLimit = ((nodeFlavor.getMinMainMemoryAvailableGb() - 1) * defaultMemoryLimit) / nodeFlavor.getMinMainMemoryAvailableGb();
private void tuneWriteFilter(ProtonConfig.Writefilter.Builder builder) { double reservedMemoryGb = 1; double defaultMemoryLimit = new ProtonConfig.Writefilter(new ProtonConfig.Writefilter.Builder()).memorylimit(); double scaledMemoryLimit = ((nodeFlavor.getMinMainMemoryAvailableGb() - reservedMemoryGb) * defaultMemoryLimit) / nodeFlavor.getMinMainMemoryAvailableGb(); builder.memorylimit(scaledMemoryLimit); }
class NodeFlavorTuning implements ProtonConfig.Producer { static long MB = 1024 * 1024; static long GB = MB * 1024; private final Flavor nodeFlavor; public NodeFlavorTuning(Flavor nodeFlavor) { this.nodeFlavor = nodeFlavor; } @Override public void getConfig(ProtonConfig.Builder builder) { setHwInfo(builder); tuneDiskWriteSpeed(builder); tuneDocumentStoreMaxFileSize(builder.summary.log); tuneFlushStrategyMemoryLimits(builder.flush.memory); tuneFlushStrategyTlsSize(builder.flush.memory); tuneSummaryReadIo(builder.summary.read); tuneSummaryCache(builder.summary.cache); tuneSearchReadIo(builder.search.mmap); tuneWriteFilter(builder.writefilter); for (ProtonConfig.Documentdb.Builder dbb : builder.documentdb) { getConfig(dbb); } } private void getConfig(ProtonConfig.Documentdb.Builder builder) { ProtonConfig.Documentdb dbCfg = builder.build(); if (dbCfg.mode() != ProtonConfig.Documentdb.Mode.Enum.INDEX) { long numDocs = (long)nodeFlavor.getMinMainMemoryAvailableGb()*GB/64L; builder.allocation.initialnumdocs(numDocs); } } private void tuneSummaryCache(ProtonConfig.Summary.Cache.Builder builder) { long memoryLimitBytes = (long) ((nodeFlavor.getMinMainMemoryAvailableGb() * 0.05) * GB); builder.maxbytes(memoryLimitBytes); } private void setHwInfo(ProtonConfig.Builder builder) { builder.hwinfo.disk.size((long)nodeFlavor.getMinDiskAvailableGb() * GB); builder.hwinfo.disk.shared(nodeFlavor.getType().equals(Flavor.Type.DOCKER_CONTAINER)); builder.hwinfo.memory.size((long)nodeFlavor.getMinMainMemoryAvailableGb() * GB); builder.hwinfo.cpu.cores((int)nodeFlavor.getMinCpuCores()); } private void tuneDiskWriteSpeed(ProtonConfig.Builder builder) { if (!nodeFlavor.hasFastDisk()) { builder.hwinfo.disk.writespeed(40); } } private void tuneDocumentStoreMaxFileSize(ProtonConfig.Summary.Log.Builder builder) { double memoryGb = nodeFlavor.getMinMainMemoryAvailableGb(); long fileSizeBytes = 4 * GB; if (memoryGb <= 12.0) { fileSizeBytes = 256 * MB; } else if (memoryGb < 24.0) { fileSizeBytes = 512 * MB; } else if (memoryGb <= 64.0) { fileSizeBytes = 1 * GB; } builder.maxfilesize(fileSizeBytes); } private void tuneFlushStrategyMemoryLimits(ProtonConfig.Flush.Memory.Builder builder) { long memoryLimitBytes = (long) ((nodeFlavor.getMinMainMemoryAvailableGb() / 8) * GB); builder.maxmemory(memoryLimitBytes); builder.each.maxmemory(memoryLimitBytes); } private void tuneFlushStrategyTlsSize(ProtonConfig.Flush.Memory.Builder builder) { long tlsSizeBytes = (long) ((nodeFlavor.getMinDiskAvailableGb() * 0.07) * GB); tlsSizeBytes = min(tlsSizeBytes, 100 * GB); builder.maxtlssize(tlsSizeBytes); } private void tuneSummaryReadIo(ProtonConfig.Summary.Read.Builder builder) { if (nodeFlavor.hasFastDisk()) { builder.io(ProtonConfig.Summary.Read.Io.DIRECTIO); } } private void tuneSearchReadIo(ProtonConfig.Search.Mmap.Builder builder) { if (nodeFlavor.hasFastDisk()) { builder.advise(ProtonConfig.Search.Mmap.Advise.RANDOM); } } }
class NodeFlavorTuning implements ProtonConfig.Producer { static long MB = 1024 * 1024; static long GB = MB * 1024; private final Flavor nodeFlavor; public NodeFlavorTuning(Flavor nodeFlavor) { this.nodeFlavor = nodeFlavor; } @Override public void getConfig(ProtonConfig.Builder builder) { setHwInfo(builder); tuneDiskWriteSpeed(builder); tuneDocumentStoreMaxFileSize(builder.summary.log); tuneFlushStrategyMemoryLimits(builder.flush.memory); tuneFlushStrategyTlsSize(builder.flush.memory); tuneSummaryReadIo(builder.summary.read); tuneSummaryCache(builder.summary.cache); tuneSearchReadIo(builder.search.mmap); tuneWriteFilter(builder.writefilter); for (ProtonConfig.Documentdb.Builder dbb : builder.documentdb) { getConfig(dbb); } } private void getConfig(ProtonConfig.Documentdb.Builder builder) { ProtonConfig.Documentdb dbCfg = builder.build(); if (dbCfg.mode() != ProtonConfig.Documentdb.Mode.Enum.INDEX) { long numDocs = (long)nodeFlavor.getMinMainMemoryAvailableGb()*GB/64L; builder.allocation.initialnumdocs(numDocs); } } private void tuneSummaryCache(ProtonConfig.Summary.Cache.Builder builder) { long memoryLimitBytes = (long) ((nodeFlavor.getMinMainMemoryAvailableGb() * 0.05) * GB); builder.maxbytes(memoryLimitBytes); } private void setHwInfo(ProtonConfig.Builder builder) { builder.hwinfo.disk.size((long)nodeFlavor.getMinDiskAvailableGb() * GB); builder.hwinfo.disk.shared(nodeFlavor.getType().equals(Flavor.Type.DOCKER_CONTAINER)); builder.hwinfo.memory.size((long)nodeFlavor.getMinMainMemoryAvailableGb() * GB); builder.hwinfo.cpu.cores((int)nodeFlavor.getMinCpuCores()); } private void tuneDiskWriteSpeed(ProtonConfig.Builder builder) { if (!nodeFlavor.hasFastDisk()) { builder.hwinfo.disk.writespeed(40); } } private void tuneDocumentStoreMaxFileSize(ProtonConfig.Summary.Log.Builder builder) { double memoryGb = nodeFlavor.getMinMainMemoryAvailableGb(); long fileSizeBytes = 4 * GB; if (memoryGb <= 12.0) { fileSizeBytes = 256 * MB; } else if (memoryGb < 24.0) { fileSizeBytes = 512 * MB; } else if (memoryGb <= 64.0) { fileSizeBytes = 1 * GB; } builder.maxfilesize(fileSizeBytes); } private void tuneFlushStrategyMemoryLimits(ProtonConfig.Flush.Memory.Builder builder) { long memoryLimitBytes = (long) ((nodeFlavor.getMinMainMemoryAvailableGb() / 8) * GB); builder.maxmemory(memoryLimitBytes); builder.each.maxmemory(memoryLimitBytes); } private void tuneFlushStrategyTlsSize(ProtonConfig.Flush.Memory.Builder builder) { long tlsSizeBytes = (long) ((nodeFlavor.getMinDiskAvailableGb() * 0.07) * GB); tlsSizeBytes = min(tlsSizeBytes, 100 * GB); builder.maxtlssize(tlsSizeBytes); } private void tuneSummaryReadIo(ProtonConfig.Summary.Read.Builder builder) { if (nodeFlavor.hasFastDisk()) { builder.io(ProtonConfig.Summary.Read.Io.DIRECTIO); } } private void tuneSearchReadIo(ProtonConfig.Search.Mmap.Builder builder) { if (nodeFlavor.hasFastDisk()) { builder.advise(ProtonConfig.Search.Mmap.Advise.RANDOM); } } }
This seems odd. `RotationName` and `EndpointId` are different things, right?
private static Set<RoutingId> routingIdsFrom(List<LoadBalancer> loadBalancers) { Set<RoutingId> routingIds = new LinkedHashSet<>(); for (var loadBalancer : loadBalancers) { for (var rotation : loadBalancer.rotations()) { routingIds.add(new RoutingId(loadBalancer.application(), EndpointId.of(rotation.value()))); } } return Collections.unmodifiableSet(routingIds); }
routingIds.add(new RoutingId(loadBalancer.application(), EndpointId.of(rotation.value())));
private static Set<RoutingId> routingIdsFrom(List<LoadBalancer> loadBalancers) { Set<RoutingId> routingIds = new LinkedHashSet<>(); for (var loadBalancer : loadBalancers) { for (var rotation : loadBalancer.rotations()) { routingIds.add(new RoutingId(loadBalancer.application(), EndpointId.of(rotation.value()))); } } return Collections.unmodifiableSet(routingIds); }
class RoutingPolicies { private final Controller controller; private final CuratorDb db; public RoutingPolicies(Controller controller) { this.controller = Objects.requireNonNull(controller, "controller must be non-null"); this.db = controller.curator(); try (var lock = db.lockRoutingPolicies()) { for (var policy : db.readRoutingPolicies().entrySet()) { db.writeRoutingPolicies(policy.getKey(), policy.getValue()); } } } /** Read all known routing policies for given application */ public Set<RoutingPolicy> get(ApplicationId application) { return db.readRoutingPolicies(application); } /** Read all known routing policies for given deployment */ public Set<RoutingPolicy> get(DeploymentId deployment) { return get(deployment.applicationId(), deployment.zoneId()); } /** Read all known routing policies for given deployment */ public Set<RoutingPolicy> get(ApplicationId application, ZoneId zone) { return db.readRoutingPolicies(application).stream() .filter(policy -> policy.zone().equals(zone)) .collect(Collectors.toUnmodifiableSet()); } /** * Refresh routing policies for application in given zone. This is idempotent and changes will only be performed if * load balancers for given application have changed. */ public void refresh(ApplicationId application, ZoneId zone) { if (!controller.zoneRegistry().zones().directlyRouted().ids().contains(zone)) return; var lbs = new LoadBalancers(application, zone, controller.applications().configServer() .getLoadBalancers(application, zone)); try (var lock = db.lockRoutingPolicies()) { removeObsoleteEndpointsFromDns(lbs, lock); storePoliciesOf(lbs, lock); removeObsoletePolicies(lbs, lock); registerEndpointsInDns(lbs, lock); } } /** Create global endpoints for given route, if any */ private void registerEndpointsInDns(LoadBalancers loadBalancers, @SuppressWarnings("unused") Lock lock) { Map<RoutingId, List<RoutingPolicy>> routingTable = routingTableFrom(get(loadBalancers.application)); for (Map.Entry<RoutingId, List<RoutingPolicy>> routeEntry : routingTable.entrySet()) { Endpoint endpoint = RoutingPolicy.endpointOf(routeEntry.getKey().application(), routeEntry.getKey().endpointId(), controller.system()); Set<AliasTarget> targets = routeEntry.getValue() .stream() .filter(policy -> policy.dnsZone().isPresent()) .map(policy -> new AliasTarget(policy.canonicalName(), policy.dnsZone().get(), policy.zone())) .collect(Collectors.toSet()); controller.nameServiceForwarder().createAlias(RecordName.from(endpoint.dnsName()), targets, Priority.normal); } } /** Store routing policies for given route */ private void storePoliciesOf(LoadBalancers loadBalancers, @SuppressWarnings("unused") Lock lock) { Set<RoutingPolicy> policies = new LinkedHashSet<>(get(loadBalancers.application)); for (LoadBalancer loadBalancer : loadBalancers.list) { RoutingPolicy policy = createPolicy(loadBalancers.application, loadBalancers.zone, loadBalancer); if (!policies.add(policy)) { policies.remove(policy); policies.add(policy); } } db.writeRoutingPolicies(loadBalancers.application, policies); } /** Create a policy for given load balancer and register a CNAME for it */ private RoutingPolicy createPolicy(ApplicationId application, ZoneId zone, LoadBalancer loadBalancer) { Set<EndpointId> endpoints = loadBalancer.rotations().stream() .map(RotationName::value) .map(EndpointId::of) .collect(Collectors.toSet()); RoutingPolicy routingPolicy = new RoutingPolicy(application, loadBalancer.cluster(), zone, loadBalancer.hostname(), loadBalancer.dnsZone(), endpoints); RecordName name = RecordName.from(routingPolicy.endpointIn(controller.system()).dnsName()); RecordData data = RecordData.fqdn(loadBalancer.hostname().value()); controller.nameServiceForwarder().createCname(name, data, Priority.normal); return routingPolicy; } /** Remove obsolete policies for given route and their CNAME records */ private void removeObsoletePolicies(LoadBalancers loadBalancers, @SuppressWarnings("unused") Lock lock) { var allPolicies = new LinkedHashSet<>(get(loadBalancers.application)); var removalCandidates = new HashSet<>(allPolicies); var activeLoadBalancers = loadBalancers.list.stream() .map(LoadBalancer::hostname) .collect(Collectors.toSet()); removalCandidates.removeIf(policy -> activeLoadBalancers.contains(policy.canonicalName()) || !policy.zone().equals(loadBalancers.zone)); for (var policy : removalCandidates) { var dnsName = policy.endpointIn(controller.system()).dnsName(); controller.nameServiceForwarder().removeRecords(Record.Type.CNAME, RecordName.from(dnsName), Priority.normal); allPolicies.remove(policy); } db.writeRoutingPolicies(loadBalancers.application, allPolicies); } /** Remove unreferenced global endpoints for given route from DNS */ private void removeObsoleteEndpointsFromDns(LoadBalancers loadBalancers, @SuppressWarnings("unused") Lock lock) { var zonePolicies = get(loadBalancers.application, loadBalancers.zone); var removalCandidates = routingTableFrom(zonePolicies).keySet(); var activeRoutingIds = routingIdsFrom(loadBalancers.list); removalCandidates.removeAll(activeRoutingIds); for (var id : removalCandidates) { Endpoint endpoint = RoutingPolicy.endpointOf(id.application(), id.endpointId(), controller.system()); controller.nameServiceForwarder().removeRecords(Record.Type.ALIAS, RecordName.from(endpoint.dnsName()), Priority.normal); } } /** Compute routing IDs from given load balancers */ /** Compute a routing table from given policies */ private static Map<RoutingId, List<RoutingPolicy>> routingTableFrom(Set<RoutingPolicy> routingPolicies) { var routingTable = new LinkedHashMap<RoutingId, List<RoutingPolicy>>(); for (var policy : routingPolicies) { for (var rotation : policy.endpoints()) { var id = new RoutingId(policy.owner(), rotation); routingTable.putIfAbsent(id, new ArrayList<>()); routingTable.get(id).add(policy); } } return routingTable; } /** Load balancers for a particular deployment */ private static class LoadBalancers { private final ApplicationId application; private final ZoneId zone; private final List<LoadBalancer> list; private LoadBalancers(ApplicationId application, ZoneId zone, List<LoadBalancer> list) { this.application = application; this.zone = zone; this.list = list; } } }
class RoutingPolicies { private final Controller controller; private final CuratorDb db; public RoutingPolicies(Controller controller) { this.controller = Objects.requireNonNull(controller, "controller must be non-null"); this.db = controller.curator(); try (var lock = db.lockRoutingPolicies()) { for (var policy : db.readRoutingPolicies().entrySet()) { db.writeRoutingPolicies(policy.getKey(), policy.getValue()); } } } /** Read all known routing policies for given application */ public Set<RoutingPolicy> get(ApplicationId application) { return db.readRoutingPolicies(application); } /** Read all known routing policies for given deployment */ public Set<RoutingPolicy> get(DeploymentId deployment) { return get(deployment.applicationId(), deployment.zoneId()); } /** Read all known routing policies for given deployment */ public Set<RoutingPolicy> get(ApplicationId application, ZoneId zone) { return db.readRoutingPolicies(application).stream() .filter(policy -> policy.zone().equals(zone)) .collect(Collectors.toUnmodifiableSet()); } /** * Refresh routing policies for application in given zone. This is idempotent and changes will only be performed if * load balancers for given application have changed. */ public void refresh(ApplicationId application, ZoneId zone) { if (!controller.zoneRegistry().zones().directlyRouted().ids().contains(zone)) return; var lbs = new LoadBalancers(application, zone, controller.applications().configServer() .getLoadBalancers(application, zone)); try (var lock = db.lockRoutingPolicies()) { removeObsoleteEndpointsFromDns(lbs, lock); storePoliciesOf(lbs, lock); removeObsoletePolicies(lbs, lock); registerEndpointsInDns(lbs, lock); } } /** Create global endpoints for given route, if any */ private void registerEndpointsInDns(LoadBalancers loadBalancers, @SuppressWarnings("unused") Lock lock) { Map<RoutingId, List<RoutingPolicy>> routingTable = routingTableFrom(get(loadBalancers.application)); for (Map.Entry<RoutingId, List<RoutingPolicy>> routeEntry : routingTable.entrySet()) { Endpoint endpoint = RoutingPolicy.endpointOf(routeEntry.getKey().application(), routeEntry.getKey().endpointId(), controller.system()); Set<AliasTarget> targets = routeEntry.getValue() .stream() .filter(policy -> policy.dnsZone().isPresent()) .map(policy -> new AliasTarget(policy.canonicalName(), policy.dnsZone().get(), policy.zone())) .collect(Collectors.toSet()); controller.nameServiceForwarder().createAlias(RecordName.from(endpoint.dnsName()), targets, Priority.normal); } } /** Store routing policies for given route */ private void storePoliciesOf(LoadBalancers loadBalancers, @SuppressWarnings("unused") Lock lock) { Set<RoutingPolicy> policies = new LinkedHashSet<>(get(loadBalancers.application)); for (LoadBalancer loadBalancer : loadBalancers.list) { RoutingPolicy policy = createPolicy(loadBalancers.application, loadBalancers.zone, loadBalancer); if (!policies.add(policy)) { policies.remove(policy); policies.add(policy); } } db.writeRoutingPolicies(loadBalancers.application, policies); } /** Create a policy for given load balancer and register a CNAME for it */ private RoutingPolicy createPolicy(ApplicationId application, ZoneId zone, LoadBalancer loadBalancer) { Set<EndpointId> endpoints = loadBalancer.rotations().stream() .map(RotationName::value) .map(EndpointId::of) .collect(Collectors.toSet()); RoutingPolicy routingPolicy = new RoutingPolicy(application, loadBalancer.cluster(), zone, loadBalancer.hostname(), loadBalancer.dnsZone(), endpoints); RecordName name = RecordName.from(routingPolicy.endpointIn(controller.system()).dnsName()); RecordData data = RecordData.fqdn(loadBalancer.hostname().value()); controller.nameServiceForwarder().createCname(name, data, Priority.normal); return routingPolicy; } /** Remove obsolete policies for given route and their CNAME records */ private void removeObsoletePolicies(LoadBalancers loadBalancers, @SuppressWarnings("unused") Lock lock) { var allPolicies = new LinkedHashSet<>(get(loadBalancers.application)); var removalCandidates = new HashSet<>(allPolicies); var activeLoadBalancers = loadBalancers.list.stream() .map(LoadBalancer::hostname) .collect(Collectors.toSet()); removalCandidates.removeIf(policy -> activeLoadBalancers.contains(policy.canonicalName()) || !policy.zone().equals(loadBalancers.zone)); for (var policy : removalCandidates) { var dnsName = policy.endpointIn(controller.system()).dnsName(); controller.nameServiceForwarder().removeRecords(Record.Type.CNAME, RecordName.from(dnsName), Priority.normal); allPolicies.remove(policy); } db.writeRoutingPolicies(loadBalancers.application, allPolicies); } /** Remove unreferenced global endpoints for given route from DNS */ private void removeObsoleteEndpointsFromDns(LoadBalancers loadBalancers, @SuppressWarnings("unused") Lock lock) { var zonePolicies = get(loadBalancers.application, loadBalancers.zone); var removalCandidates = routingTableFrom(zonePolicies).keySet(); var activeRoutingIds = routingIdsFrom(loadBalancers.list); removalCandidates.removeAll(activeRoutingIds); for (var id : removalCandidates) { Endpoint endpoint = RoutingPolicy.endpointOf(id.application(), id.endpointId(), controller.system()); controller.nameServiceForwarder().removeRecords(Record.Type.ALIAS, RecordName.from(endpoint.dnsName()), Priority.normal); } } /** Compute routing IDs from given load balancers */ /** Compute a routing table from given policies */ private static Map<RoutingId, List<RoutingPolicy>> routingTableFrom(Set<RoutingPolicy> routingPolicies) { var routingTable = new LinkedHashMap<RoutingId, List<RoutingPolicy>>(); for (var policy : routingPolicies) { for (var rotation : policy.endpoints()) { var id = new RoutingId(policy.owner(), rotation); routingTable.putIfAbsent(id, new ArrayList<>()); routingTable.get(id).add(policy); } } return routingTable; } /** Load balancers for a particular deployment */ private static class LoadBalancers { private final ApplicationId application; private final ZoneId zone; private final List<LoadBalancer> list; private LoadBalancers(ApplicationId application, ZoneId zone, List<LoadBalancer> list) { this.application = application; this.zone = zone; this.list = list; } } }
(nit) Better as: ```java var endpointIds = new LinkedHashSet<EndpointId>(); ```
public Set<RoutingPolicy> fromSlime(ApplicationId owner, Slime slime) { Set<RoutingPolicy> policies = new LinkedHashSet<>(); Cursor root = slime.get(); Cursor field = root.field(routingPoliciesField); field.traverse((ArrayTraverser) (i, inspect) -> { Set<EndpointId> endpointIds = new LinkedHashSet<>(); inspect.field(rotationsField).traverse((ArrayTraverser) (j, endpointId) -> endpointIds.add(EndpointId.of(endpointId.asString()))); policies.add(new RoutingPolicy(owner, ClusterSpec.Id.from(inspect.field(clusterField).asString()), ZoneId.from(inspect.field(zoneField).asString()), HostName.from(inspect.field(canonicalNameField).asString()), Serializers.optionalField(inspect.field(dnsZoneField), Function.identity()), endpointIds)); }); return Collections.unmodifiableSet(policies); }
Set<EndpointId> endpointIds = new LinkedHashSet<>();
public Set<RoutingPolicy> fromSlime(ApplicationId owner, Slime slime) { var policies = new LinkedHashSet<RoutingPolicy>(); var root = slime.get(); var field = root.field(routingPoliciesField); field.traverse((ArrayTraverser) (i, inspect) -> { var endpointIds = new LinkedHashSet<EndpointId>(); inspect.field(rotationsField).traverse((ArrayTraverser) (j, endpointId) -> endpointIds.add(EndpointId.of(endpointId.asString()))); policies.add(new RoutingPolicy(owner, ClusterSpec.Id.from(inspect.field(clusterField).asString()), ZoneId.from(inspect.field(zoneField).asString()), HostName.from(inspect.field(canonicalNameField).asString()), Serializers.optionalField(inspect.field(dnsZoneField), Function.identity()), endpointIds)); }); return Collections.unmodifiableSet(policies); }
class RoutingPolicySerializer { private static final String routingPoliciesField = "routingPolicies"; private static final String clusterField = "cluster"; private static final String canonicalNameField = "canonicalName"; private static final String zoneField = "zone"; private static final String dnsZoneField = "dnsZone"; private static final String rotationsField = "rotations"; public Slime toSlime(Set<RoutingPolicy> routingPolicies) { Slime slime = new Slime(); Cursor root = slime.setObject(); Cursor policyArray = root.setArray(routingPoliciesField); routingPolicies.forEach(policy -> { Cursor policyObject = policyArray.addObject(); policyObject.setString(clusterField, policy.cluster().value()); policyObject.setString(zoneField, policy.zone().value()); policyObject.setString(canonicalNameField, policy.canonicalName().value()); policy.dnsZone().ifPresent(dnsZone -> policyObject.setString(dnsZoneField, dnsZone)); Cursor rotationArray = policyObject.setArray(rotationsField); policy.endpoints().forEach(endpointId -> { rotationArray.addString(endpointId.id()); }); }); return slime; } }
class RoutingPolicySerializer { private static final String routingPoliciesField = "routingPolicies"; private static final String clusterField = "cluster"; private static final String canonicalNameField = "canonicalName"; private static final String zoneField = "zone"; private static final String dnsZoneField = "dnsZone"; private static final String rotationsField = "rotations"; public Slime toSlime(Set<RoutingPolicy> routingPolicies) { var slime = new Slime(); var root = slime.setObject(); var policyArray = root.setArray(routingPoliciesField); routingPolicies.forEach(policy -> { var policyObject = policyArray.addObject(); policyObject.setString(clusterField, policy.cluster().value()); policyObject.setString(zoneField, policy.zone().value()); policyObject.setString(canonicalNameField, policy.canonicalName().value()); policy.dnsZone().ifPresent(dnsZone -> policyObject.setString(dnsZoneField, dnsZone)); var rotationArray = policyObject.setArray(rotationsField); policy.endpoints().forEach(endpointId -> { rotationArray.addString(endpointId.id()); }); }); return slime; } }
Nit: Change name of variable as well.
public void test_global_endpoints() { EndpointId rotation = EndpointId.default_(); Map<String, Endpoint> tests = Map.of( "http: Endpoint.of(app1).named(rotation).on(Port.plain(4080)).legacy().in(SystemName.main), "https: Endpoint.of(app1).named(rotation).on(Port.tls(4443)).legacy().in(SystemName.main), "https: Endpoint.of(app1).named(rotation).on(Port.tls(4443)).in(SystemName.main), "https: Endpoint.of(app1).named(rotation).on(Port.tls(4443)).in(SystemName.cd), "https: Endpoint.of(app1).named(rotation).on(Port.tls()).directRouting().in(SystemName.main), "https: Endpoint.of(app1).named(EndpointId.of("r1")).on(Port.tls()).directRouting().in(SystemName.main), "https: Endpoint.of(app2).named(rotation).on(Port.tls()).directRouting().in(SystemName.main), "https: Endpoint.of(app2).named(EndpointId.of("r2")).on(Port.tls()).directRouting().in(SystemName.main), "https: Endpoint.of(app1).named(rotation).on(Port.tls()).directRouting().in(SystemName.Public) ); tests.forEach((expected, endpoint) -> assertEquals(expected, endpoint.url().toString())); }
EndpointId rotation = EndpointId.default_();
public void test_global_endpoints() { EndpointId endpointId = EndpointId.default_(); Map<String, Endpoint> tests = Map.of( "http: Endpoint.of(app1).named(endpointId).on(Port.plain(4080)).legacy().in(SystemName.main), "https: Endpoint.of(app1).named(endpointId).on(Port.tls(4443)).legacy().in(SystemName.main), "https: Endpoint.of(app1).named(endpointId).on(Port.tls(4443)).in(SystemName.main), "https: Endpoint.of(app1).named(endpointId).on(Port.tls(4443)).in(SystemName.cd), "https: Endpoint.of(app1).named(endpointId).on(Port.tls()).directRouting().in(SystemName.main), "https: Endpoint.of(app1).named(EndpointId.of("r1")).on(Port.tls()).directRouting().in(SystemName.main), "https: Endpoint.of(app2).named(endpointId).on(Port.tls()).directRouting().in(SystemName.main), "https: Endpoint.of(app2).named(EndpointId.of("r2")).on(Port.tls()).directRouting().in(SystemName.main), "https: Endpoint.of(app1).named(endpointId).on(Port.tls()).directRouting().in(SystemName.Public) ); tests.forEach((expected, endpoint) -> assertEquals(expected, endpoint.url().toString())); }
class EndpointTest { private static final ApplicationId app1 = ApplicationId.from("t1", "a1", "default"); private static final ApplicationId app2 = ApplicationId.from("t2", "a2", "i2"); @Test @Test public void test_global_endpoints_with_endpoint_id() { final var endpointId = EndpointId.default_(); Map<String, Endpoint> tests = Map.of( "http: Endpoint.of(app1).named(endpointId).on(Port.plain(4080)).legacy().in(SystemName.main), "https: Endpoint.of(app1).named(endpointId).on(Port.tls(4443)).legacy().in(SystemName.main), "https: Endpoint.of(app1).named(endpointId).on(Port.tls(4443)).in(SystemName.main), "https: Endpoint.of(app1).named(endpointId).on(Port.tls(4443)).in(SystemName.cd), "https: Endpoint.of(app1).named(endpointId).on(Port.tls()).directRouting().in(SystemName.main), "https: Endpoint.of(app1).named(EndpointId.of("r1")).on(Port.tls()).directRouting().in(SystemName.main), "https: Endpoint.of(app2).named(endpointId).on(Port.tls()).directRouting().in(SystemName.main), "https: Endpoint.of(app2).named(EndpointId.of("r2")).on(Port.tls()).directRouting().in(SystemName.main), "https: Endpoint.of(app1).named(endpointId).on(Port.tls()).directRouting().in(SystemName.Public) ); tests.forEach((expected, endpoint) -> assertEquals(expected, endpoint.url().toString())); } @Test public void test_zone_endpoints() { ClusterSpec.Id cluster = ClusterSpec.Id.from("default"); ZoneId prodZone = ZoneId.from("prod", "us-north-1"); ZoneId testZone = ZoneId.from("test", "us-north-2"); Map<String, Endpoint> tests = Map.of( "http: Endpoint.of(app1).target(cluster, prodZone).on(Port.plain(4080)).legacy().in(SystemName.main), "https: Endpoint.of(app1).target(cluster, prodZone).on(Port.tls(4443)).legacy().in(SystemName.main), "https: Endpoint.of(app1).target(cluster, prodZone).on(Port.tls(4443)).in(SystemName.main), "https: Endpoint.of(app1).target(cluster, prodZone).on(Port.tls(4443)).in(SystemName.cd), "https: Endpoint.of(app1).target(cluster, testZone).on(Port.tls(4443)).in(SystemName.main), "https: Endpoint.of(app1).target(ClusterSpec.Id.from("c1"), prodZone).on(Port.tls()).in(SystemName.main), "https: Endpoint.of(app2).target(cluster, prodZone).on(Port.tls(4443)).in(SystemName.main), "https: Endpoint.of(app1).target(ClusterSpec.Id.from("c1"), prodZone).on(Port.tls()).directRouting().in(SystemName.Public), "https: Endpoint.of(app2).target(ClusterSpec.Id.from("c2"), prodZone).on(Port.tls()).directRouting().in(SystemName.Public) ); tests.forEach((expected, endpoint) -> assertEquals(expected, endpoint.url().toString())); } }
class EndpointTest { private static final ApplicationId app1 = ApplicationId.from("t1", "a1", "default"); private static final ApplicationId app2 = ApplicationId.from("t2", "a2", "i2"); @Test @Test public void test_global_endpoints_with_endpoint_id() { var endpointId = EndpointId.default_(); Map<String, Endpoint> tests = Map.of( "http: Endpoint.of(app1).named(endpointId).on(Port.plain(4080)).legacy().in(SystemName.main), "https: Endpoint.of(app1).named(endpointId).on(Port.tls(4443)).legacy().in(SystemName.main), "https: Endpoint.of(app1).named(endpointId).on(Port.tls(4443)).in(SystemName.main), "https: Endpoint.of(app1).named(endpointId).on(Port.tls(4443)).in(SystemName.cd), "https: Endpoint.of(app1).named(endpointId).on(Port.tls()).directRouting().in(SystemName.main), "https: Endpoint.of(app1).named(EndpointId.of("r1")).on(Port.tls()).directRouting().in(SystemName.main), "https: Endpoint.of(app2).named(endpointId).on(Port.tls()).directRouting().in(SystemName.main), "https: Endpoint.of(app2).named(EndpointId.of("r2")).on(Port.tls()).directRouting().in(SystemName.main), "https: Endpoint.of(app1).named(endpointId).on(Port.tls()).directRouting().in(SystemName.Public) ); tests.forEach((expected, endpoint) -> assertEquals(expected, endpoint.url().toString())); } @Test public void test_zone_endpoints() { var cluster = ClusterSpec.Id.from("default"); var prodZone = ZoneId.from("prod", "us-north-1"); var testZone = ZoneId.from("test", "us-north-2"); Map<String, Endpoint> tests = Map.of( "http: Endpoint.of(app1).target(cluster, prodZone).on(Port.plain(4080)).legacy().in(SystemName.main), "https: Endpoint.of(app1).target(cluster, prodZone).on(Port.tls(4443)).legacy().in(SystemName.main), "https: Endpoint.of(app1).target(cluster, prodZone).on(Port.tls(4443)).in(SystemName.main), "https: Endpoint.of(app1).target(cluster, prodZone).on(Port.tls(4443)).in(SystemName.cd), "https: Endpoint.of(app1).target(cluster, testZone).on(Port.tls(4443)).in(SystemName.main), "https: Endpoint.of(app1).target(ClusterSpec.Id.from("c1"), prodZone).on(Port.tls()).in(SystemName.main), "https: Endpoint.of(app2).target(cluster, prodZone).on(Port.tls(4443)).in(SystemName.main), "https: Endpoint.of(app1).target(ClusterSpec.Id.from("c1"), prodZone).on(Port.tls()).directRouting().in(SystemName.Public), "https: Endpoint.of(app2).target(ClusterSpec.Id.from("c2"), prodZone).on(Port.tls()).directRouting().in(SystemName.Public) ); tests.forEach((expected, endpoint) -> assertEquals(expected, endpoint.url().toString())); } }
It's temporary to reduce dependencies on `RotationName`. They represent the same thing.
private static Set<RoutingId> routingIdsFrom(List<LoadBalancer> loadBalancers) { Set<RoutingId> routingIds = new LinkedHashSet<>(); for (var loadBalancer : loadBalancers) { for (var rotation : loadBalancer.rotations()) { routingIds.add(new RoutingId(loadBalancer.application(), EndpointId.of(rotation.value()))); } } return Collections.unmodifiableSet(routingIds); }
routingIds.add(new RoutingId(loadBalancer.application(), EndpointId.of(rotation.value())));
private static Set<RoutingId> routingIdsFrom(List<LoadBalancer> loadBalancers) { Set<RoutingId> routingIds = new LinkedHashSet<>(); for (var loadBalancer : loadBalancers) { for (var rotation : loadBalancer.rotations()) { routingIds.add(new RoutingId(loadBalancer.application(), EndpointId.of(rotation.value()))); } } return Collections.unmodifiableSet(routingIds); }
class RoutingPolicies { private final Controller controller; private final CuratorDb db; public RoutingPolicies(Controller controller) { this.controller = Objects.requireNonNull(controller, "controller must be non-null"); this.db = controller.curator(); try (var lock = db.lockRoutingPolicies()) { for (var policy : db.readRoutingPolicies().entrySet()) { db.writeRoutingPolicies(policy.getKey(), policy.getValue()); } } } /** Read all known routing policies for given application */ public Set<RoutingPolicy> get(ApplicationId application) { return db.readRoutingPolicies(application); } /** Read all known routing policies for given deployment */ public Set<RoutingPolicy> get(DeploymentId deployment) { return get(deployment.applicationId(), deployment.zoneId()); } /** Read all known routing policies for given deployment */ public Set<RoutingPolicy> get(ApplicationId application, ZoneId zone) { return db.readRoutingPolicies(application).stream() .filter(policy -> policy.zone().equals(zone)) .collect(Collectors.toUnmodifiableSet()); } /** * Refresh routing policies for application in given zone. This is idempotent and changes will only be performed if * load balancers for given application have changed. */ public void refresh(ApplicationId application, ZoneId zone) { if (!controller.zoneRegistry().zones().directlyRouted().ids().contains(zone)) return; var lbs = new LoadBalancers(application, zone, controller.applications().configServer() .getLoadBalancers(application, zone)); try (var lock = db.lockRoutingPolicies()) { removeObsoleteEndpointsFromDns(lbs, lock); storePoliciesOf(lbs, lock); removeObsoletePolicies(lbs, lock); registerEndpointsInDns(lbs, lock); } } /** Create global endpoints for given route, if any */ private void registerEndpointsInDns(LoadBalancers loadBalancers, @SuppressWarnings("unused") Lock lock) { Map<RoutingId, List<RoutingPolicy>> routingTable = routingTableFrom(get(loadBalancers.application)); for (Map.Entry<RoutingId, List<RoutingPolicy>> routeEntry : routingTable.entrySet()) { Endpoint endpoint = RoutingPolicy.endpointOf(routeEntry.getKey().application(), routeEntry.getKey().endpointId(), controller.system()); Set<AliasTarget> targets = routeEntry.getValue() .stream() .filter(policy -> policy.dnsZone().isPresent()) .map(policy -> new AliasTarget(policy.canonicalName(), policy.dnsZone().get(), policy.zone())) .collect(Collectors.toSet()); controller.nameServiceForwarder().createAlias(RecordName.from(endpoint.dnsName()), targets, Priority.normal); } } /** Store routing policies for given route */ private void storePoliciesOf(LoadBalancers loadBalancers, @SuppressWarnings("unused") Lock lock) { Set<RoutingPolicy> policies = new LinkedHashSet<>(get(loadBalancers.application)); for (LoadBalancer loadBalancer : loadBalancers.list) { RoutingPolicy policy = createPolicy(loadBalancers.application, loadBalancers.zone, loadBalancer); if (!policies.add(policy)) { policies.remove(policy); policies.add(policy); } } db.writeRoutingPolicies(loadBalancers.application, policies); } /** Create a policy for given load balancer and register a CNAME for it */ private RoutingPolicy createPolicy(ApplicationId application, ZoneId zone, LoadBalancer loadBalancer) { Set<EndpointId> endpoints = loadBalancer.rotations().stream() .map(RotationName::value) .map(EndpointId::of) .collect(Collectors.toSet()); RoutingPolicy routingPolicy = new RoutingPolicy(application, loadBalancer.cluster(), zone, loadBalancer.hostname(), loadBalancer.dnsZone(), endpoints); RecordName name = RecordName.from(routingPolicy.endpointIn(controller.system()).dnsName()); RecordData data = RecordData.fqdn(loadBalancer.hostname().value()); controller.nameServiceForwarder().createCname(name, data, Priority.normal); return routingPolicy; } /** Remove obsolete policies for given route and their CNAME records */ private void removeObsoletePolicies(LoadBalancers loadBalancers, @SuppressWarnings("unused") Lock lock) { var allPolicies = new LinkedHashSet<>(get(loadBalancers.application)); var removalCandidates = new HashSet<>(allPolicies); var activeLoadBalancers = loadBalancers.list.stream() .map(LoadBalancer::hostname) .collect(Collectors.toSet()); removalCandidates.removeIf(policy -> activeLoadBalancers.contains(policy.canonicalName()) || !policy.zone().equals(loadBalancers.zone)); for (var policy : removalCandidates) { var dnsName = policy.endpointIn(controller.system()).dnsName(); controller.nameServiceForwarder().removeRecords(Record.Type.CNAME, RecordName.from(dnsName), Priority.normal); allPolicies.remove(policy); } db.writeRoutingPolicies(loadBalancers.application, allPolicies); } /** Remove unreferenced global endpoints for given route from DNS */ private void removeObsoleteEndpointsFromDns(LoadBalancers loadBalancers, @SuppressWarnings("unused") Lock lock) { var zonePolicies = get(loadBalancers.application, loadBalancers.zone); var removalCandidates = routingTableFrom(zonePolicies).keySet(); var activeRoutingIds = routingIdsFrom(loadBalancers.list); removalCandidates.removeAll(activeRoutingIds); for (var id : removalCandidates) { Endpoint endpoint = RoutingPolicy.endpointOf(id.application(), id.endpointId(), controller.system()); controller.nameServiceForwarder().removeRecords(Record.Type.ALIAS, RecordName.from(endpoint.dnsName()), Priority.normal); } } /** Compute routing IDs from given load balancers */ /** Compute a routing table from given policies */ private static Map<RoutingId, List<RoutingPolicy>> routingTableFrom(Set<RoutingPolicy> routingPolicies) { var routingTable = new LinkedHashMap<RoutingId, List<RoutingPolicy>>(); for (var policy : routingPolicies) { for (var rotation : policy.endpoints()) { var id = new RoutingId(policy.owner(), rotation); routingTable.putIfAbsent(id, new ArrayList<>()); routingTable.get(id).add(policy); } } return routingTable; } /** Load balancers for a particular deployment */ private static class LoadBalancers { private final ApplicationId application; private final ZoneId zone; private final List<LoadBalancer> list; private LoadBalancers(ApplicationId application, ZoneId zone, List<LoadBalancer> list) { this.application = application; this.zone = zone; this.list = list; } } }
class RoutingPolicies { private final Controller controller; private final CuratorDb db; public RoutingPolicies(Controller controller) { this.controller = Objects.requireNonNull(controller, "controller must be non-null"); this.db = controller.curator(); try (var lock = db.lockRoutingPolicies()) { for (var policy : db.readRoutingPolicies().entrySet()) { db.writeRoutingPolicies(policy.getKey(), policy.getValue()); } } } /** Read all known routing policies for given application */ public Set<RoutingPolicy> get(ApplicationId application) { return db.readRoutingPolicies(application); } /** Read all known routing policies for given deployment */ public Set<RoutingPolicy> get(DeploymentId deployment) { return get(deployment.applicationId(), deployment.zoneId()); } /** Read all known routing policies for given deployment */ public Set<RoutingPolicy> get(ApplicationId application, ZoneId zone) { return db.readRoutingPolicies(application).stream() .filter(policy -> policy.zone().equals(zone)) .collect(Collectors.toUnmodifiableSet()); } /** * Refresh routing policies for application in given zone. This is idempotent and changes will only be performed if * load balancers for given application have changed. */ public void refresh(ApplicationId application, ZoneId zone) { if (!controller.zoneRegistry().zones().directlyRouted().ids().contains(zone)) return; var lbs = new LoadBalancers(application, zone, controller.applications().configServer() .getLoadBalancers(application, zone)); try (var lock = db.lockRoutingPolicies()) { removeObsoleteEndpointsFromDns(lbs, lock); storePoliciesOf(lbs, lock); removeObsoletePolicies(lbs, lock); registerEndpointsInDns(lbs, lock); } } /** Create global endpoints for given route, if any */ private void registerEndpointsInDns(LoadBalancers loadBalancers, @SuppressWarnings("unused") Lock lock) { Map<RoutingId, List<RoutingPolicy>> routingTable = routingTableFrom(get(loadBalancers.application)); for (Map.Entry<RoutingId, List<RoutingPolicy>> routeEntry : routingTable.entrySet()) { Endpoint endpoint = RoutingPolicy.endpointOf(routeEntry.getKey().application(), routeEntry.getKey().endpointId(), controller.system()); Set<AliasTarget> targets = routeEntry.getValue() .stream() .filter(policy -> policy.dnsZone().isPresent()) .map(policy -> new AliasTarget(policy.canonicalName(), policy.dnsZone().get(), policy.zone())) .collect(Collectors.toSet()); controller.nameServiceForwarder().createAlias(RecordName.from(endpoint.dnsName()), targets, Priority.normal); } } /** Store routing policies for given route */ private void storePoliciesOf(LoadBalancers loadBalancers, @SuppressWarnings("unused") Lock lock) { Set<RoutingPolicy> policies = new LinkedHashSet<>(get(loadBalancers.application)); for (LoadBalancer loadBalancer : loadBalancers.list) { RoutingPolicy policy = createPolicy(loadBalancers.application, loadBalancers.zone, loadBalancer); if (!policies.add(policy)) { policies.remove(policy); policies.add(policy); } } db.writeRoutingPolicies(loadBalancers.application, policies); } /** Create a policy for given load balancer and register a CNAME for it */ private RoutingPolicy createPolicy(ApplicationId application, ZoneId zone, LoadBalancer loadBalancer) { Set<EndpointId> endpoints = loadBalancer.rotations().stream() .map(RotationName::value) .map(EndpointId::of) .collect(Collectors.toSet()); RoutingPolicy routingPolicy = new RoutingPolicy(application, loadBalancer.cluster(), zone, loadBalancer.hostname(), loadBalancer.dnsZone(), endpoints); RecordName name = RecordName.from(routingPolicy.endpointIn(controller.system()).dnsName()); RecordData data = RecordData.fqdn(loadBalancer.hostname().value()); controller.nameServiceForwarder().createCname(name, data, Priority.normal); return routingPolicy; } /** Remove obsolete policies for given route and their CNAME records */ private void removeObsoletePolicies(LoadBalancers loadBalancers, @SuppressWarnings("unused") Lock lock) { var allPolicies = new LinkedHashSet<>(get(loadBalancers.application)); var removalCandidates = new HashSet<>(allPolicies); var activeLoadBalancers = loadBalancers.list.stream() .map(LoadBalancer::hostname) .collect(Collectors.toSet()); removalCandidates.removeIf(policy -> activeLoadBalancers.contains(policy.canonicalName()) || !policy.zone().equals(loadBalancers.zone)); for (var policy : removalCandidates) { var dnsName = policy.endpointIn(controller.system()).dnsName(); controller.nameServiceForwarder().removeRecords(Record.Type.CNAME, RecordName.from(dnsName), Priority.normal); allPolicies.remove(policy); } db.writeRoutingPolicies(loadBalancers.application, allPolicies); } /** Remove unreferenced global endpoints for given route from DNS */ private void removeObsoleteEndpointsFromDns(LoadBalancers loadBalancers, @SuppressWarnings("unused") Lock lock) { var zonePolicies = get(loadBalancers.application, loadBalancers.zone); var removalCandidates = routingTableFrom(zonePolicies).keySet(); var activeRoutingIds = routingIdsFrom(loadBalancers.list); removalCandidates.removeAll(activeRoutingIds); for (var id : removalCandidates) { Endpoint endpoint = RoutingPolicy.endpointOf(id.application(), id.endpointId(), controller.system()); controller.nameServiceForwarder().removeRecords(Record.Type.ALIAS, RecordName.from(endpoint.dnsName()), Priority.normal); } } /** Compute routing IDs from given load balancers */ /** Compute a routing table from given policies */ private static Map<RoutingId, List<RoutingPolicy>> routingTableFrom(Set<RoutingPolicy> routingPolicies) { var routingTable = new LinkedHashMap<RoutingId, List<RoutingPolicy>>(); for (var policy : routingPolicies) { for (var rotation : policy.endpoints()) { var id = new RoutingId(policy.owner(), rotation); routingTable.putIfAbsent(id, new ArrayList<>()); routingTable.get(id).add(policy); } } return routingTable; } /** Load balancers for a particular deployment */ private static class LoadBalancers { private final ApplicationId application; private final ZoneId zone; private final List<LoadBalancer> list; private LoadBalancers(ApplicationId application, ZoneId zone, List<LoadBalancer> list) { this.application = application; this.zone = zone; this.list = list; } } }
Instead of if/else, just do a `assertTrue(failurePath.isPresent()`
public void testWithRealData() throws IOException { String path = "./src/test/resources/zookeeper_dump.json"; tester.cleanRepository(); tester.restoreNodeRepositoryFromJsonFile(Paths.get(path)); var failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); if (failurePath.isPresent()) { System.out.println("Worst case host loss : " + failurePath.get().hostsCausingFailure.size()); assertTrue(tester.nodeRepository.getNodes(NodeType.host).containsAll(failurePath.get().hostsCausingFailure)); } else fail(); }
if (failurePath.isPresent()) {
public void testWithRealData() throws IOException { String path = "./src/test/resources/zookeeper_dump.json"; tester.cleanRepository(); tester.restoreNodeRepositoryFromJsonFile(Paths.get(path)); var failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); assertTrue(tester.nodeRepository.getNodes(NodeType.host).containsAll(failurePath.get().hostsCausingFailure)); }
class CapacityCheckerTest { private CapacityCheckerTester tester; @Before public void setup() { tester = new CapacityCheckerTester(); } @Test @Test public void testOvercommittedHosts() { tester.createNodes(7, 4, 10, new NodeResources(-1, 10, 100), 10, 0, new NodeResources(1, 10, 100), 10); int overcommittedHosts = tester.capacityChecker.findOvercommittedHosts().size(); assertEquals(tester.nodeRepository.getNodes(NodeType.host).size(), overcommittedHosts); } @Test public void testEdgeCaseFailurePaths() { tester.createNodes(1, 1, 0, new NodeResources(1, 10, 100), 10, 0, new NodeResources(1, 10, 100), 10); var failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertFalse("Computing worst case host loss with no hosts should return an empty optional.", failurePath.isPresent()); tester.createNodes(1, 10, 10, new NodeResources(10, 1000, 10000), 100, 1, new NodeResources(10, 1000, 10000), 100); failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); assertTrue("Computing worst case host loss if all hosts have to be removed should result in an non-empty failureReason with empty nodes.", failurePath.get().failureReason.tenant.isEmpty() && failurePath.get().failureReason.host.isEmpty()); assertEquals(tester.nodeRepository.getNodes(NodeType.host).size(), failurePath.get().hostsCausingFailure.size()); tester.createNodes(3, 30, 10, new NodeResources(0, 0, 10000), 1000, 0, new NodeResources(0, 0, 0), 0); failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.failureReasons; assertEquals("When there are multiple lacking resources, all failures are multipleReasonFailures", failureReasons.size(), failureReasons.multipleReasonFailures().size()); assertEquals(0, failureReasons.singularReasonFailures().size()); } else fail(); } @Test public void testIpFailurePaths() { tester.createNodes(1, 10, 10, new NodeResources(10, 1000, 10000), 1, 10, new NodeResources(10, 1000, 10000), 1); var failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.failureReasons; assertEquals("All failures should be due to hosts having a lack of available ip addresses.", failureReasons.singularReasonFailures().insufficientAvailableIps(), failureReasons.size()); } else fail(); } @Test public void testNodeResourceFailurePaths() { tester.createNodes(1, 10, 10, new NodeResources(1, 100, 1000), 100, 10, new NodeResources(0, 100, 1000), 100); var failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.failureReasons; assertEquals("All failures should be due to hosts lacking cpu cores.", failureReasons.singularReasonFailures().insufficientVcpu(), failureReasons.size()); } else fail(); tester.createNodes(1, 10, 10, new NodeResources(10, 1, 1000), 100, 10, new NodeResources(10, 0, 1000), 100); failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.failureReasons; assertEquals("All failures should be due to hosts lacking memory.", failureReasons.singularReasonFailures().insufficientMemoryGb(), failureReasons.size()); } else fail(); tester.createNodes(1, 10, 10, new NodeResources(10, 100, 10), 100, 10, new NodeResources(10, 100, 0), 100); failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.failureReasons; assertEquals("All failures should be due to hosts lacking disk space.", failureReasons.singularReasonFailures().insufficientDiskGb(), failureReasons.size()); } else fail(); int emptyHostsWithSlowDisk = 10; tester.createNodes(1, 10, List.of(new NodeResources(1, 10, 100)), 10, new NodeResources(0, 0, 0), 100, 10, new NodeResources(10, 1000, 10000, NodeResources.DiskSpeed.slow), 100); failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.failureReasons; assertEquals("All empty hosts should be invalid due to having incompatible disk speed.", failureReasons.singularReasonFailures().incompatibleDiskSpeed(), emptyHostsWithSlowDisk); } else fail(); } @Test public void testParentHostPolicyIntegrityFailurePaths() { tester.createNodes(1, 1, 10, new NodeResources(1, 100, 1000), 100, 10, new NodeResources(10, 1000, 10000), 100); var failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.failureReasons; assertEquals("With only one type of tenant, all failures should be due to violation of the parent host policy.", failureReasons.singularReasonFailures().violatesParentHostPolicy(), failureReasons.size()); } else fail(); tester.createNodes(1, 2, 10, new NodeResources(10, 100, 1000), 1, 0, new NodeResources(0, 0, 0), 0); failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.failureReasons; assertNotEquals("Fewer distinct children than hosts should result in some parent host policy violations.", failureReasons.size(), failureReasons.singularReasonFailures().violatesParentHostPolicy()); assertNotEquals(0, failureReasons.singularReasonFailures().violatesParentHostPolicy()); } else fail(); } }
class CapacityCheckerTest { private CapacityCheckerTester tester; @Before public void setup() { tester = new CapacityCheckerTester(); } @Test @Test public void testOvercommittedHosts() { tester.createNodes(7, 4, 10, new NodeResources(-1, 10, 100), 10, 0, new NodeResources(1, 10, 100), 10); int overcommittedHosts = tester.capacityChecker.findOvercommittedHosts().size(); assertEquals(tester.nodeRepository.getNodes(NodeType.host).size(), overcommittedHosts); } @Test public void testEdgeCaseFailurePaths() { tester.createNodes(1, 1, 0, new NodeResources(1, 10, 100), 10, 0, new NodeResources(1, 10, 100), 10); var failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertFalse("Computing worst case host loss with no hosts should return an empty optional.", failurePath.isPresent()); tester.createNodes(1, 10, 10, new NodeResources(10, 1000, 10000), 100, 1, new NodeResources(10, 1000, 10000), 100); failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); assertTrue("Computing worst case host loss if all hosts have to be removed should result in an non-empty failureReason with empty nodes.", failurePath.get().failureReason.tenant.isEmpty() && failurePath.get().failureReason.host.isEmpty()); assertEquals(tester.nodeRepository.getNodes(NodeType.host).size(), failurePath.get().hostsCausingFailure.size()); tester.createNodes(3, 30, 10, new NodeResources(0, 0, 10000), 1000, 0, new NodeResources(0, 0, 0), 0); failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.allocationFailures; assertEquals("When there are multiple lacking resources, all failures are multipleReasonFailures", failureReasons.size(), failureReasons.multipleReasonFailures().size()); assertEquals(0, failureReasons.singularReasonFailures().size()); } else fail(); } @Test public void testIpFailurePaths() { tester.createNodes(1, 10, 10, new NodeResources(10, 1000, 10000), 1, 10, new NodeResources(10, 1000, 10000), 1); var failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.allocationFailures; assertEquals("All failures should be due to hosts having a lack of available ip addresses.", failureReasons.singularReasonFailures().insufficientAvailableIps(), failureReasons.size()); } else fail(); } @Test public void testNodeResourceFailurePaths() { tester.createNodes(1, 10, 10, new NodeResources(1, 100, 1000), 100, 10, new NodeResources(0, 100, 1000), 100); var failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.allocationFailures; assertEquals("All failures should be due to hosts lacking cpu cores.", failureReasons.singularReasonFailures().insufficientVcpu(), failureReasons.size()); } else fail(); tester.createNodes(1, 10, 10, new NodeResources(10, 1, 1000), 100, 10, new NodeResources(10, 0, 1000), 100); failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.allocationFailures; assertEquals("All failures should be due to hosts lacking memory.", failureReasons.singularReasonFailures().insufficientMemoryGb(), failureReasons.size()); } else fail(); tester.createNodes(1, 10, 10, new NodeResources(10, 100, 10), 100, 10, new NodeResources(10, 100, 0), 100); failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.allocationFailures; assertEquals("All failures should be due to hosts lacking disk space.", failureReasons.singularReasonFailures().insufficientDiskGb(), failureReasons.size()); } else fail(); int emptyHostsWithSlowDisk = 10; tester.createNodes(1, 10, List.of(new NodeResources(1, 10, 100)), 10, new NodeResources(0, 0, 0), 100, 10, new NodeResources(10, 1000, 10000, NodeResources.DiskSpeed.slow), 100); failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.allocationFailures; assertEquals("All empty hosts should be invalid due to having incompatible disk speed.", failureReasons.singularReasonFailures().incompatibleDiskSpeed(), emptyHostsWithSlowDisk); } else fail(); } @Test public void testParentHostPolicyIntegrityFailurePaths() { tester.createNodes(1, 1, 10, new NodeResources(1, 100, 1000), 100, 10, new NodeResources(10, 1000, 10000), 100); var failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.allocationFailures; assertEquals("With only one type of tenant, all failures should be due to violation of the parent host policy.", failureReasons.singularReasonFailures().violatesParentHostPolicy(), failureReasons.size()); } else fail(); tester.createNodes(1, 2, 10, new NodeResources(10, 100, 1000), 1, 0, new NodeResources(0, 0, 0), 0); failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.allocationFailures; assertNotEquals("Fewer distinct children than hosts should result in some parent host policy violations.", failureReasons.size(), failureReasons.singularReasonFailures().violatesParentHostPolicy()); assertNotEquals(0, failureReasons.singularReasonFailures().violatesParentHostPolicy()); } else fail(); } }
No need to print. If you're interested in output in case of failure, include the string in the assertTrue below.
public void testWithRealData() throws IOException { String path = "./src/test/resources/zookeeper_dump.json"; tester.cleanRepository(); tester.restoreNodeRepositoryFromJsonFile(Paths.get(path)); var failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); if (failurePath.isPresent()) { System.out.println("Worst case host loss : " + failurePath.get().hostsCausingFailure.size()); assertTrue(tester.nodeRepository.getNodes(NodeType.host).containsAll(failurePath.get().hostsCausingFailure)); } else fail(); }
System.out.println("Worst case host loss : " + failurePath.get().hostsCausingFailure.size());
public void testWithRealData() throws IOException { String path = "./src/test/resources/zookeeper_dump.json"; tester.cleanRepository(); tester.restoreNodeRepositoryFromJsonFile(Paths.get(path)); var failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); assertTrue(tester.nodeRepository.getNodes(NodeType.host).containsAll(failurePath.get().hostsCausingFailure)); }
class CapacityCheckerTest { private CapacityCheckerTester tester; @Before public void setup() { tester = new CapacityCheckerTester(); } @Test @Test public void testOvercommittedHosts() { tester.createNodes(7, 4, 10, new NodeResources(-1, 10, 100), 10, 0, new NodeResources(1, 10, 100), 10); int overcommittedHosts = tester.capacityChecker.findOvercommittedHosts().size(); assertEquals(tester.nodeRepository.getNodes(NodeType.host).size(), overcommittedHosts); } @Test public void testEdgeCaseFailurePaths() { tester.createNodes(1, 1, 0, new NodeResources(1, 10, 100), 10, 0, new NodeResources(1, 10, 100), 10); var failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertFalse("Computing worst case host loss with no hosts should return an empty optional.", failurePath.isPresent()); tester.createNodes(1, 10, 10, new NodeResources(10, 1000, 10000), 100, 1, new NodeResources(10, 1000, 10000), 100); failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); assertTrue("Computing worst case host loss if all hosts have to be removed should result in an non-empty failureReason with empty nodes.", failurePath.get().failureReason.tenant.isEmpty() && failurePath.get().failureReason.host.isEmpty()); assertEquals(tester.nodeRepository.getNodes(NodeType.host).size(), failurePath.get().hostsCausingFailure.size()); tester.createNodes(3, 30, 10, new NodeResources(0, 0, 10000), 1000, 0, new NodeResources(0, 0, 0), 0); failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.failureReasons; assertEquals("When there are multiple lacking resources, all failures are multipleReasonFailures", failureReasons.size(), failureReasons.multipleReasonFailures().size()); assertEquals(0, failureReasons.singularReasonFailures().size()); } else fail(); } @Test public void testIpFailurePaths() { tester.createNodes(1, 10, 10, new NodeResources(10, 1000, 10000), 1, 10, new NodeResources(10, 1000, 10000), 1); var failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.failureReasons; assertEquals("All failures should be due to hosts having a lack of available ip addresses.", failureReasons.singularReasonFailures().insufficientAvailableIps(), failureReasons.size()); } else fail(); } @Test public void testNodeResourceFailurePaths() { tester.createNodes(1, 10, 10, new NodeResources(1, 100, 1000), 100, 10, new NodeResources(0, 100, 1000), 100); var failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.failureReasons; assertEquals("All failures should be due to hosts lacking cpu cores.", failureReasons.singularReasonFailures().insufficientVcpu(), failureReasons.size()); } else fail(); tester.createNodes(1, 10, 10, new NodeResources(10, 1, 1000), 100, 10, new NodeResources(10, 0, 1000), 100); failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.failureReasons; assertEquals("All failures should be due to hosts lacking memory.", failureReasons.singularReasonFailures().insufficientMemoryGb(), failureReasons.size()); } else fail(); tester.createNodes(1, 10, 10, new NodeResources(10, 100, 10), 100, 10, new NodeResources(10, 100, 0), 100); failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.failureReasons; assertEquals("All failures should be due to hosts lacking disk space.", failureReasons.singularReasonFailures().insufficientDiskGb(), failureReasons.size()); } else fail(); int emptyHostsWithSlowDisk = 10; tester.createNodes(1, 10, List.of(new NodeResources(1, 10, 100)), 10, new NodeResources(0, 0, 0), 100, 10, new NodeResources(10, 1000, 10000, NodeResources.DiskSpeed.slow), 100); failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.failureReasons; assertEquals("All empty hosts should be invalid due to having incompatible disk speed.", failureReasons.singularReasonFailures().incompatibleDiskSpeed(), emptyHostsWithSlowDisk); } else fail(); } @Test public void testParentHostPolicyIntegrityFailurePaths() { tester.createNodes(1, 1, 10, new NodeResources(1, 100, 1000), 100, 10, new NodeResources(10, 1000, 10000), 100); var failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.failureReasons; assertEquals("With only one type of tenant, all failures should be due to violation of the parent host policy.", failureReasons.singularReasonFailures().violatesParentHostPolicy(), failureReasons.size()); } else fail(); tester.createNodes(1, 2, 10, new NodeResources(10, 100, 1000), 1, 0, new NodeResources(0, 0, 0), 0); failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.failureReasons; assertNotEquals("Fewer distinct children than hosts should result in some parent host policy violations.", failureReasons.size(), failureReasons.singularReasonFailures().violatesParentHostPolicy()); assertNotEquals(0, failureReasons.singularReasonFailures().violatesParentHostPolicy()); } else fail(); } }
class CapacityCheckerTest { private CapacityCheckerTester tester; @Before public void setup() { tester = new CapacityCheckerTester(); } @Test @Test public void testOvercommittedHosts() { tester.createNodes(7, 4, 10, new NodeResources(-1, 10, 100), 10, 0, new NodeResources(1, 10, 100), 10); int overcommittedHosts = tester.capacityChecker.findOvercommittedHosts().size(); assertEquals(tester.nodeRepository.getNodes(NodeType.host).size(), overcommittedHosts); } @Test public void testEdgeCaseFailurePaths() { tester.createNodes(1, 1, 0, new NodeResources(1, 10, 100), 10, 0, new NodeResources(1, 10, 100), 10); var failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertFalse("Computing worst case host loss with no hosts should return an empty optional.", failurePath.isPresent()); tester.createNodes(1, 10, 10, new NodeResources(10, 1000, 10000), 100, 1, new NodeResources(10, 1000, 10000), 100); failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); assertTrue("Computing worst case host loss if all hosts have to be removed should result in an non-empty failureReason with empty nodes.", failurePath.get().failureReason.tenant.isEmpty() && failurePath.get().failureReason.host.isEmpty()); assertEquals(tester.nodeRepository.getNodes(NodeType.host).size(), failurePath.get().hostsCausingFailure.size()); tester.createNodes(3, 30, 10, new NodeResources(0, 0, 10000), 1000, 0, new NodeResources(0, 0, 0), 0); failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.allocationFailures; assertEquals("When there are multiple lacking resources, all failures are multipleReasonFailures", failureReasons.size(), failureReasons.multipleReasonFailures().size()); assertEquals(0, failureReasons.singularReasonFailures().size()); } else fail(); } @Test public void testIpFailurePaths() { tester.createNodes(1, 10, 10, new NodeResources(10, 1000, 10000), 1, 10, new NodeResources(10, 1000, 10000), 1); var failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.allocationFailures; assertEquals("All failures should be due to hosts having a lack of available ip addresses.", failureReasons.singularReasonFailures().insufficientAvailableIps(), failureReasons.size()); } else fail(); } @Test public void testNodeResourceFailurePaths() { tester.createNodes(1, 10, 10, new NodeResources(1, 100, 1000), 100, 10, new NodeResources(0, 100, 1000), 100); var failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.allocationFailures; assertEquals("All failures should be due to hosts lacking cpu cores.", failureReasons.singularReasonFailures().insufficientVcpu(), failureReasons.size()); } else fail(); tester.createNodes(1, 10, 10, new NodeResources(10, 1, 1000), 100, 10, new NodeResources(10, 0, 1000), 100); failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.allocationFailures; assertEquals("All failures should be due to hosts lacking memory.", failureReasons.singularReasonFailures().insufficientMemoryGb(), failureReasons.size()); } else fail(); tester.createNodes(1, 10, 10, new NodeResources(10, 100, 10), 100, 10, new NodeResources(10, 100, 0), 100); failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.allocationFailures; assertEquals("All failures should be due to hosts lacking disk space.", failureReasons.singularReasonFailures().insufficientDiskGb(), failureReasons.size()); } else fail(); int emptyHostsWithSlowDisk = 10; tester.createNodes(1, 10, List.of(new NodeResources(1, 10, 100)), 10, new NodeResources(0, 0, 0), 100, 10, new NodeResources(10, 1000, 10000, NodeResources.DiskSpeed.slow), 100); failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.allocationFailures; assertEquals("All empty hosts should be invalid due to having incompatible disk speed.", failureReasons.singularReasonFailures().incompatibleDiskSpeed(), emptyHostsWithSlowDisk); } else fail(); } @Test public void testParentHostPolicyIntegrityFailurePaths() { tester.createNodes(1, 1, 10, new NodeResources(1, 100, 1000), 100, 10, new NodeResources(10, 1000, 10000), 100); var failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.allocationFailures; assertEquals("With only one type of tenant, all failures should be due to violation of the parent host policy.", failureReasons.singularReasonFailures().violatesParentHostPolicy(), failureReasons.size()); } else fail(); tester.createNodes(1, 2, 10, new NodeResources(10, 100, 1000), 1, 0, new NodeResources(0, 0, 0), 0); failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.allocationFailures; assertNotEquals("Fewer distinct children than hosts should result in some parent host policy violations.", failureReasons.size(), failureReasons.singularReasonFailures().violatesParentHostPolicy()); assertNotEquals(0, failureReasons.singularReasonFailures().violatesParentHostPolicy()); } else fail(); } }
Refactor these nested if/else-blocks to separate functions
public HostCapacityResponse(NodeRepository nodeRepository, HttpRequest request) { super(200); capacityChecker = new CapacityChecker(nodeRepository); json = request.getBooleanProperty("json"); String hostsJson = request.getProperty("hosts"); text = new StringBuilder(); slime = new Slime(); Cursor root = slime.setObject(); if (hostsJson != null) { ObjectMapper om = new ObjectMapper(); String[] hostsArray; try { hostsArray = om.readValue(hostsJson, String[].class); } catch (Exception e) { throw new IllegalArgumentException(e.getMessage()); } List<String> hostNames = Arrays.asList(hostsArray); List<Node> hosts; try { hosts = capacityChecker.nodesFromHostnames(hostNames); } catch (IllegalArgumentException e) { throw new NotFoundException(e.getMessage()); } var failure = capacityChecker.findHostRemovalFailure(hosts); if (failure.isPresent() && failure.get().failureReason.failureReasons.size() == 0) { root.setBool("removalPossible", false); error(root, "Removing all hosts is trivially impossible."); } else { if (json) hostLossPossibleToSlime(root, failure, hosts); else hostLossPossibleToText(failure, hosts); } } else { var failurePath = capacityChecker.worstCaseHostLossLeadingToFailure(); if (failurePath.isPresent()) { if (json) zoneFailurePathToSlime(root, failurePath.get()); else zoneFailurePathToText(failurePath.get()); } else { error(root, "Node repository contained no hosts."); } } }
if (hostsJson != null) {
public HostCapacityResponse(NodeRepository nodeRepository, HttpRequest request) { super(200); capacityChecker = new CapacityChecker(nodeRepository); json = request.getBooleanProperty("json"); String hostsJson = request.getProperty("hosts"); text = new StringBuilder(); slime = new Slime(); Cursor root = slime.setObject(); if (hostsJson != null) { List<Node> hosts = parseHostList(hostsJson); hostRemovalResponse(root, hosts); } else { zoneFailureReponse(root); } }
class HostCapacityResponse extends HttpResponse { private final StringBuilder text; private final Slime slime; private final CapacityChecker capacityChecker; private final boolean json; private void error(Cursor root, String errorMessage) { if (json) root.setString("error", errorMessage); else text.append(errorMessage); } private void hostLossPossibleToText(Optional<CapacityChecker.HostFailurePath> failure, List<Node> hostsToRemove) { text.append(String.format("Attempting to remove %d hosts: ", hostsToRemove.size())); CapacityChecker.AllocationHistory history = capacityChecker.allocationHistory; if (failure.isEmpty()) { text.append("OK\n\n"); text.append(history); if (history.oldParents().size() != hostsToRemove.size()) { long emptyHostCount = hostsToRemove.size() - history.oldParents().size(); text.append(String.format("\nTrivially removed %d empty host%s.", emptyHostCount, emptyHostCount > 1 ? "s" : "")); } } else { text.append("FAILURE\n\n"); text.append(history).append("\n"); text.append(failure.get().failureReason).append("\n\n"); } } private void zoneFailurePathToText(CapacityChecker.HostFailurePath failurePath) { text.append(String.format("Found %d hosts. Failure upon trying to remove %d hosts:\n\n", capacityChecker.getHosts().size(), failurePath.hostsCausingFailure.size())); text.append(capacityChecker.allocationHistory).append("\n"); text.append(failurePath.failureReason); } private void hostLossPossibleToSlime(Cursor root, Optional<CapacityChecker.HostFailurePath> failure, List<Node> hostsToRemove) { var hosts = root.setArray("hostsToRemove"); hostsToRemove.forEach(h -> hosts.addString(h.hostname())); CapacityChecker.AllocationHistory history = capacityChecker.allocationHistory; if (failure.isEmpty()) { root.setBool("removalPossible", true); } else { root.setBool("removalPossible", false); } var arr = root.setArray("history"); for (var entry : history.historyEntries) { var object = arr.addObject(); object.setString("tenant", entry.tenant.hostname()); if (entry.newParent != null) { object.setString("newParent", entry.newParent.hostname()); } object.setLong("eligibleParents", entry.eligibleParents); } } public void zoneFailurePathToSlime(Cursor object, CapacityChecker.HostFailurePath failurePath) { object.setLong("totalHosts", capacityChecker.getHosts().size()); object.setLong("couldLoseHosts", failurePath.hostsCausingFailure.size()); failurePath.failureReason.host.ifPresent(host -> object.setString("failedTenantParent", host.hostname()) ); failurePath.failureReason.tenant.ifPresent(tenant -> { object.setString("failedTenant", tenant.hostname()); object.setString("failedTenantResources", tenant.flavor().resources().toString()); tenant.allocation().ifPresent(allocation -> object.setString("failedTenantAllocation", allocation.toString()) ); var explanation = object.setObject("hostCandidateRejectionReasons"); allocationFailureReasonListToSlime(explanation.setObject("singularReasonFailures"), failurePath.failureReason.failureReasons.singularReasonFailures()); allocationFailureReasonListToSlime(explanation.setObject("totalFailures"), failurePath.failureReason.failureReasons); }); var details = object.setObject("details"); hostLossPossibleToSlime(details, Optional.of(failurePath), failurePath.hostsCausingFailure); } private void allocationFailureReasonListToSlime(Cursor root, CapacityChecker.AllocationFailureReasonList allocationFailureReasonList) { root.setLong("insufficientVcpu", allocationFailureReasonList.insufficientVcpu()); root.setLong("insufficientMemoryGb", allocationFailureReasonList.insufficientMemoryGb()); root.setLong("insufficientDiskGb", allocationFailureReasonList.insufficientDiskGb()); root.setLong("incompatibleDiskSpeed", allocationFailureReasonList.incompatibleDiskSpeed()); root.setLong("insufficientAvailableIps", allocationFailureReasonList.insufficientAvailableIps()); root.setLong("violatesParentHostPolicy", allocationFailureReasonList.violatesParentHostPolicy()); } @Override public void render(OutputStream stream) throws IOException { if (json) new JsonFormat(true).encode(stream, slime); else stream.write(text.toString().getBytes()); } @Override public String getContentType() { return json ? "application/json" : "text/plain"; } }
class HostCapacityResponse extends HttpResponse { private final StringBuilder text; private final Slime slime; private final CapacityChecker capacityChecker; private final boolean json; private List<Node> parseHostList(String hosts) { ObjectMapper om = new ObjectMapper(); String[] hostsArray; try { hostsArray = om.readValue(hosts, String[].class); } catch (Exception e) { throw new IllegalArgumentException(e.getMessage()); } List<String> hostNames = Arrays.asList(hostsArray); try { return capacityChecker.nodesFromHostnames(hostNames); } catch (IllegalArgumentException e) { throw new NotFoundException(e.getMessage()); } } private void hostRemovalResponse(Cursor root, List<Node> hosts) { var failure = capacityChecker.findHostRemovalFailure(hosts); if (failure.isPresent() && failure.get().failureReason.allocationFailures.size() == 0) { root.setBool("removalPossible", false); error(root, "Removing all hosts is trivially impossible."); } else { if (json) hostLossPossibleToSlime(root, failure, hosts); else hostLossPossibleToText(failure, hosts); } } private void zoneFailureReponse(Cursor root) { var failurePath = capacityChecker.worstCaseHostLossLeadingToFailure(); if (failurePath.isPresent()) { if (json) zoneFailurePathToSlime(root, failurePath.get()); else zoneFailurePathToText(failurePath.get()); } else { error(root, "Node repository contained no hosts."); } } private void error(Cursor root, String errorMessage) { if (json) root.setString("error", errorMessage); else text.append(errorMessage); } private void hostLossPossibleToText(Optional<CapacityChecker.HostFailurePath> failure, List<Node> hostsToRemove) { text.append(String.format("Attempting to remove %d hosts: ", hostsToRemove.size())); CapacityChecker.AllocationHistory history = capacityChecker.allocationHistory; if (failure.isEmpty()) { text.append("OK\n\n"); text.append(history); if (history.oldParents().size() != hostsToRemove.size()) { long emptyHostCount = hostsToRemove.size() - history.oldParents().size(); text.append(String.format("\nTrivially removed %d empty host%s.", emptyHostCount, emptyHostCount > 1 ? "s" : "")); } } else { text.append("FAILURE\n\n"); text.append(history).append("\n"); text.append(failure.get().failureReason).append("\n\n"); } } private void zoneFailurePathToText(CapacityChecker.HostFailurePath failurePath) { text.append(String.format("Found %d hosts. Failure upon trying to remove %d hosts:\n\n", capacityChecker.getHosts().size(), failurePath.hostsCausingFailure.size())); text.append(capacityChecker.allocationHistory).append("\n"); text.append(failurePath.failureReason); } private void hostLossPossibleToSlime(Cursor root, Optional<CapacityChecker.HostFailurePath> failure, List<Node> hostsToRemove) { var hosts = root.setArray("hostsToRemove"); hostsToRemove.forEach(h -> hosts.addString(h.hostname())); CapacityChecker.AllocationHistory history = capacityChecker.allocationHistory; root.setBool("removalPossible", failure.isEmpty()); var arr = root.setArray("history"); for (var entry : history.historyEntries) { var object = arr.addObject(); object.setString("tenant", entry.tenant.hostname()); if (entry.newParent != null) { object.setString("newParent", entry.newParent.hostname()); } object.setLong("eligibleParents", entry.eligibleParents); } } private void zoneFailurePathToSlime(Cursor object, CapacityChecker.HostFailurePath failurePath) { object.setLong("totalHosts", capacityChecker.getHosts().size()); object.setLong("couldLoseHosts", failurePath.hostsCausingFailure.size()); failurePath.failureReason.host.ifPresent(host -> object.setString("failedTenantParent", host.hostname()) ); failurePath.failureReason.tenant.ifPresent(tenant -> { object.setString("failedTenant", tenant.hostname()); object.setString("failedTenantResources", tenant.flavor().resources().toString()); tenant.allocation().ifPresent(allocation -> object.setString("failedTenantAllocation", allocation.toString()) ); var explanation = object.setObject("hostCandidateRejectionReasons"); allocationFailureReasonListToSlime(explanation.setObject("singularReasonFailures"), failurePath.failureReason.allocationFailures.singularReasonFailures()); allocationFailureReasonListToSlime(explanation.setObject("totalFailures"), failurePath.failureReason.allocationFailures); }); var details = object.setObject("details"); hostLossPossibleToSlime(details, Optional.of(failurePath), failurePath.hostsCausingFailure); } private void allocationFailureReasonListToSlime(Cursor root, CapacityChecker.AllocationFailureReasonList allocationFailureReasonList) { root.setLong("insufficientVcpu", allocationFailureReasonList.insufficientVcpu()); root.setLong("insufficientMemoryGb", allocationFailureReasonList.insufficientMemoryGb()); root.setLong("insufficientDiskGb", allocationFailureReasonList.insufficientDiskGb()); root.setLong("incompatibleDiskSpeed", allocationFailureReasonList.incompatibleDiskSpeed()); root.setLong("insufficientAvailableIps", allocationFailureReasonList.insufficientAvailableIps()); root.setLong("violatesParentHostPolicy", allocationFailureReasonList.violatesParentHostPolicy()); } @Override public void render(OutputStream stream) throws IOException { if (json) new JsonFormat(true).encode(stream, slime); else stream.write(text.toString().getBytes()); } @Override public String getContentType() { return json ? "application/json" : "text/plain"; } }
Can be replaced with ``` Set<String> notFoundNodes = new HashSet<>(hostnames); notFoundNodes.removeAll(nodes); ```
public List<Node> nodesFromHostnames(List<String> hostnames) { List<Node> nodes = hostnames.stream() .filter(h -> nodeMap.containsKey(h)) .map(h -> nodeMap.get(h)) .collect(Collectors.toList()); if (nodes.size() != hostnames.size()) { List<String> notFoundNodes = hostnames.stream() .filter(h -> !nodes.stream() .map(Node::hostname).collect(Collectors.toSet()).contains(h)) .collect(Collectors.toList()); throw new IllegalArgumentException(String.format("Host(s) not found: [ %s ]", String.join(", ", notFoundNodes))); } return nodes; }
List<String> notFoundNodes = hostnames.stream()
public List<Node> nodesFromHostnames(List<String> hostnames) { List<Node> nodes = hostnames.stream() .filter(h -> nodeMap.containsKey(h)) .map(h -> nodeMap.get(h)) .collect(Collectors.toList()); if (nodes.size() != hostnames.size()) { Set<String> notFoundNodes = new HashSet<>(hostnames); notFoundNodes.removeAll(nodes.stream().map(Node::hostname).collect(Collectors.toList())); throw new IllegalArgumentException(String.format("Host(s) not found: [ %s ]", String.join(", ", notFoundNodes))); } return nodes; }
class CapacityChecker { private List<Node> hosts; Map<String, Node> nodeMap; private Map<Node, List<Node>> nodeChildren; private Map<Node, AllocationResources> availableResources; public AllocationHistory allocationHistory = null; public CapacityChecker(NodeRepository nodeRepository) { this.hosts = getHosts(nodeRepository); List<Node> tenants = getTenants(nodeRepository, hosts); nodeMap = constructHostnameToNodeMap(hosts); this.nodeChildren = constructNodeChildrenMap(tenants, hosts, nodeMap); this.availableResources = constructAvailableResourcesMap(hosts, nodeChildren); } public List<Node> getHosts() { return hosts; } public Optional<HostFailurePath> worstCaseHostLossLeadingToFailure() { Map<Node, Integer> timesNodeCanBeRemoved = computeMaximalRepeatedRemovals(hosts, nodeChildren, availableResources); return greedyHeuristicFindFailurePath(timesNodeCanBeRemoved, hosts, nodeChildren, availableResources); } protected List<Node> findOvercommittedHosts() { return findOvercommittedNodes(availableResources); } public Optional<HostFailurePath> findHostRemovalFailure(List<Node> hostsToRemove) { var removal = findHostRemovalFailure(hostsToRemove, hosts, nodeChildren, availableResources); if (removal.isEmpty()) return Optional.empty(); HostFailurePath failurePath = new HostFailurePath(); failurePath.hostsCausingFailure = hostsToRemove; failurePath.failureReason = removal.get(); return Optional.of(failurePath); } private static Node.State[] relevantNodeStates = { Node.State.active, Node.State.inactive, Node.State.dirty, Node.State.provisioned, Node.State.ready, Node.State.reserved }; private List<Node> getHosts(NodeRepository nodeRepository) { return nodeRepository.getNodes(NodeType.host, relevantNodeStates); } private List<Node> getTenants(NodeRepository nodeRepository, List<Node> hosts) { var parentNames = hosts.stream().map(Node::hostname).collect(Collectors.toSet()); return nodeRepository.getNodes(NodeType.tenant, relevantNodeStates).stream() .filter(t -> parentNames.contains(t.parentHostname().orElse(""))) .collect(Collectors.toList()); } private Optional<HostFailurePath> greedyHeuristicFindFailurePath(Map<Node, Integer> heuristic, List<Node> hosts, Map<Node, List<Node>> nodeChildren, Map<Node, AllocationResources> availableResources) { if (hosts.size() == 0) return Optional.empty(); List<Node> parentRemovalPriorityList = heuristic.entrySet().stream() .sorted(Comparator.comparingInt(Map.Entry::getValue)) .map(Map.Entry::getKey) .collect(Collectors.toList()); for (int i = 1; i <= parentRemovalPriorityList.size(); i++) { List<Node> hostsToRemove = parentRemovalPriorityList.subList(0, i); var hostRemovalFailure = findHostRemovalFailure(hostsToRemove, hosts, nodeChildren, availableResources); if (hostRemovalFailure.isPresent()) { HostFailurePath failurePath = new HostFailurePath(); failurePath.hostsCausingFailure = hostsToRemove; failurePath.failureReason = hostRemovalFailure.get(); return Optional.of(failurePath); } } throw new IllegalStateException("No path to failure found. This should be impossible!"); } private Map<String, Node> constructHostnameToNodeMap(List<Node> nodes) { return nodes.stream().collect(Collectors.toMap(Node::hostname, n -> n)); } private Map<Node, List<Node>> constructNodeChildrenMap(List<Node> tenants, List<Node> hosts, Map<String, Node> hostnameToNode) { Map<Node, List<Node>> nodeChildren = tenants.stream() .filter(n -> n.parentHostname().isPresent()) .filter(n -> hostnameToNode.containsKey(n.parentHostname().get())) .collect(Collectors.groupingBy( n -> hostnameToNode.get(n.parentHostname().orElseThrow()))); for (var host : hosts) nodeChildren.putIfAbsent(host, List.of()); return nodeChildren; } private Map<Node, AllocationResources> constructAvailableResourcesMap(List<Node> hosts, Map<Node, List<Node>> nodeChildren) { Map<Node, AllocationResources> availableResources = new HashMap<>(); for (var host : hosts) { NodeResources hostResources = host.flavor().resources(); int occupiedIps = 0; Set<String> ipPool = host.ipAddressPool().asSet(); for (var child : nodeChildren.get(host)) { hostResources = hostResources.subtract(child.flavor().resources().withDiskSpeed(NodeResources.DiskSpeed.any)); occupiedIps += child.ipAddresses().stream().filter(ipPool::contains).count(); } availableResources.put(host, new AllocationResources(hostResources, host.ipAddressPool().asSet().size() - occupiedIps)); } return availableResources; } /** * Computes a heuristic for each host, with a lower score indicating a higher perceived likelihood that removing * the host causes an unrecoverable state */ private Map<Node, Integer> computeMaximalRepeatedRemovals(List<Node> hosts, Map<Node, List<Node>> nodeChildren, Map<Node, AllocationResources> availableResources) { Map<Node, Integer> timesNodeCanBeRemoved = hosts.stream().collect(Collectors.toMap( Function.identity(), _x -> Integer.MAX_VALUE )); for (Node host : hosts) { List<Node> children = nodeChildren.get(host); if (children.size() == 0) continue; Map<Node, AllocationResources> resourceMap = new HashMap<>(availableResources); Map<Node, List<Allocation>> containedAllocations = collateAllocations(nodeChildren); int timesHostCanBeRemoved = 0; Optional<Node> unallocatedNode; while (timesHostCanBeRemoved < 1000) { unallocatedNode = tryAllocateNodes(nodeChildren.get(host), hosts, resourceMap, containedAllocations); if (unallocatedNode.isEmpty()) { timesHostCanBeRemoved++; } else break; } timesNodeCanBeRemoved.put(host, timesHostCanBeRemoved); } return timesNodeCanBeRemoved; } private List<Node> findOvercommittedNodes(Map<Node, AllocationResources> availableResources) { List<Node> overcommittedNodes = new ArrayList<>(); for (var entry : availableResources.entrySet()) { var resources = entry.getValue().nodeResources; if (resources.vcpu() < 0 || resources.memoryGb() < 0 || resources.diskGb() < 0) { overcommittedNodes.add(entry.getKey()); } } return overcommittedNodes; } private Map<Node, List<Allocation>> collateAllocations(Map<Node, List<Node>> nodeChildren) { return nodeChildren.entrySet().stream().collect(Collectors.toMap( Map.Entry::getKey, e -> e.getValue().stream() .map(Node::allocation).flatMap(Optional::stream) .collect(Collectors.toList()) )); } /** * Tests whether it's possible to remove the provided hosts. * Does not mutate any input variable. * @return Empty optional if removal is possible, information on what caused the failure otherwise */ private Optional<HostRemovalFailure> findHostRemovalFailure(List<Node> hostsToRemove, List<Node> allHosts, Map<Node, List<Node>> nodechildren, Map<Node, AllocationResources> availableResources) { var containedAllocations = collateAllocations(nodechildren); var resourceMap = new HashMap<>(availableResources); List<Node> validAllocationTargets = allHosts.stream() .filter(h -> !hostsToRemove.contains(h)) .collect(Collectors.toList()); if (validAllocationTargets.size() == 0) { return Optional.of(HostRemovalFailure.none()); } allocationHistory = new AllocationHistory(); for (var host : hostsToRemove) { Optional<Node> unallocatedNode = tryAllocateNodes(nodechildren.get(host), validAllocationTargets, resourceMap, containedAllocations, true); if (unallocatedNode.isPresent()) { AllocationFailureReasonList failures = collateAllocationFailures(unallocatedNode.get(), validAllocationTargets, resourceMap, containedAllocations); return Optional.of(HostRemovalFailure.create(host, unallocatedNode.get(), failures)); } } return Optional.empty(); } /** * Attempts to allocate the listed nodes to a new host, mutating availableResources and containedAllocations, * optionally returning the first node to fail, if one does. * */ private Optional<Node> tryAllocateNodes(List<Node> nodes, List<Node> hosts, Map<Node, AllocationResources> availableResources, Map<Node, List<Allocation>> containedAllocations) { return tryAllocateNodes(nodes, hosts, availableResources, containedAllocations, false); } private Optional<Node> tryAllocateNodes(List<Node> nodes, List<Node> hosts, Map<Node, AllocationResources> availableResources, Map<Node, List<Allocation>> containedAllocations, boolean withHistory) { for (var node : nodes) { var newParent = tryAllocateNode(node, hosts, availableResources, containedAllocations); if (newParent.isEmpty()) { if (withHistory) allocationHistory.addEntry(node, null, 0); return Optional.of(node); } if (withHistory) { long eligibleParents = hosts.stream().filter(h -> !violatesParentHostPolicy(node, h, containedAllocations) && availableResources.get(h).satisfies(AllocationResources.from(node.flavor().resources()))).count(); allocationHistory.addEntry(node, newParent.get(), eligibleParents + 1); } } return Optional.empty(); } /** * @return The parent to which the node was allocated, if it was successfully allocated. */ private Optional<Node> tryAllocateNode(Node node, List<Node> hosts, Map<Node, AllocationResources> availableResources, Map<Node, List<Allocation>> containedAllocations) { AllocationResources requiredNodeResources = AllocationResources.from(node.flavor().resources()); for (var host : hosts) { var availableHostResources = availableResources.get(host); if (violatesParentHostPolicy(node, host, containedAllocations)) { continue; } if (availableHostResources.satisfies(requiredNodeResources)) { availableResources.put(host, availableHostResources.subtract(requiredNodeResources)); if (node.allocation().isPresent()) { containedAllocations.get(host).add(node.allocation().get()); } return Optional.of(host); } } return Optional.empty(); } private static boolean violatesParentHostPolicy(Node node, Node host, Map<Node, List<Allocation>> containedAllocations) { if (node.allocation().isEmpty()) return false; Allocation nodeAllocation = node.allocation().get(); for (var allocation : containedAllocations.get(host)) { if (allocation.membership().cluster().equalsIgnoringGroupAndVespaVersion(nodeAllocation.membership().cluster()) && allocation.owner().equals(nodeAllocation.owner())) { return true; } } return false; } private AllocationFailureReasonList collateAllocationFailures(Node node, List<Node> hosts, Map<Node, AllocationResources> availableResources, Map<Node, List<Allocation>> containedAllocations) { List<AllocationFailureReason> allocationFailureReasons = new ArrayList<>(); for (var host : hosts) { AllocationFailureReason reason = new AllocationFailureReason(host); var availableHostResources = availableResources.get(host); reason.violatesParentHostPolicy = violatesParentHostPolicy(node, host, containedAllocations); NodeResources l = availableHostResources.nodeResources; NodeResources r = node.flavor().resources(); if (l.vcpu() < r.vcpu()) { reason.insufficientVcpu = true; } if (l.memoryGb() < r.memoryGb()) { reason.insufficientMemoryGb = true; } if (l.diskGb() < r.diskGb()) { reason.insufficientDiskGb = true; } if (r.diskSpeed() != NodeResources.DiskSpeed.any && r.diskSpeed() != l.diskSpeed()) { reason.incompatibleDiskSpeed = true; } if (availableHostResources.availableIPs < 1) { reason.insufficientAvailableIPs = true; } allocationFailureReasons.add(reason); } return new AllocationFailureReasonList(allocationFailureReasons); } /** * Contains the list of hosts that, upon being removed, caused an unrecoverable state, * as well as the specific host and tenant which caused it. */ public static class HostFailurePath { public List<Node> hostsCausingFailure; public HostRemovalFailure failureReason; } /** * Data class used for detailing why removing the given tenant from the given host was unsuccessful. * A failure might not be caused by failing to allocate a specific tenant, in which case the fields * will be empty. */ public static class HostRemovalFailure { public Optional<Node> host; public Optional<Node> tenant; public AllocationFailureReasonList failureReasons; public static HostRemovalFailure none() { return new HostRemovalFailure( Optional.empty(), Optional.empty(), new AllocationFailureReasonList(List.of())); } public static HostRemovalFailure create(Node host, Node tenant, AllocationFailureReasonList failureReasons) { return new HostRemovalFailure( Optional.of(host), Optional.of(tenant), failureReasons); } private HostRemovalFailure(Optional<Node> host, Optional<Node> tenant, AllocationFailureReasonList failureReasons) { this.host = host; this.tenant = tenant; this.failureReasons = failureReasons; } @Override public String toString() { if (host.isEmpty() || tenant.isEmpty()) return "No removal candidates exists."; return String.format( "Failure to remove host %s" + "\n\tNo new host found for tenant %s:" + "\n\t\tSingular Reasons: %s" + "\n\t\tTotal Reasons: %s", this.host.get().hostname(), this.tenant.get().hostname(), this.failureReasons.singularReasonFailures().toString(), this.failureReasons.toString() ); } } /** * Used to describe the resources required for a tenant, and available to a host. */ private static class AllocationResources { NodeResources nodeResources; int availableIPs; public static AllocationResources from(NodeResources nodeResources) { return new AllocationResources(nodeResources, 1); } public AllocationResources(NodeResources nodeResources, int availableIPs) { this.nodeResources = nodeResources; this.availableIPs = availableIPs; } public boolean satisfies(AllocationResources other) { if (!this.nodeResources.satisfies(other.nodeResources)) return false; return this.availableIPs >= other.availableIPs; } public AllocationResources subtract(AllocationResources other) { return new AllocationResources(this.nodeResources.subtract(other.nodeResources), this.availableIPs - other.availableIPs); } } /** * Keeps track of the reason why a host rejected an allocation. */ private static class AllocationFailureReason { Node host; public AllocationFailureReason (Node host) { this.host = host; } public boolean insufficientVcpu = false; public boolean insufficientMemoryGb = false; public boolean insufficientDiskGb = false; public boolean incompatibleDiskSpeed = false; public boolean insufficientAvailableIPs = false; public boolean violatesParentHostPolicy = false; public int numberOfReasons() { int n = 0; if (insufficientVcpu) n++; if (insufficientMemoryGb) n++; if (insufficientDiskGb) n++; if (incompatibleDiskSpeed) n++; if (insufficientAvailableIPs) n++; if (violatesParentHostPolicy) n++; return n; } @Override public String toString() { List<String> reasons = new ArrayList<>(); if (insufficientVcpu) reasons.add("insufficientVcpu"); if (insufficientMemoryGb) reasons.add("insufficientMemoryGb"); if (insufficientDiskGb) reasons.add("insufficientDiskGb"); if (incompatibleDiskSpeed) reasons.add("incompatibleDiskSpeed"); if (insufficientAvailableIPs) reasons.add("insufficientAvailableIPs"); if (violatesParentHostPolicy) reasons.add("violatesParentHostPolicy"); return String.format("[%s]", String.join(", ", reasons)); } } /** * Provides convenient methods for tallying failures. */ public static class AllocationFailureReasonList { private List<AllocationFailureReason> allocationFailureReasons; public AllocationFailureReasonList(List<AllocationFailureReason> allocationFailureReasons) { this.allocationFailureReasons = allocationFailureReasons; } public long insufficientVcpu() { return allocationFailureReasons.stream().filter(r -> r.insufficientVcpu).count(); } public long insufficientMemoryGb() { return allocationFailureReasons.stream().filter(r -> r.insufficientMemoryGb).count(); } public long insufficientDiskGb() { return allocationFailureReasons.stream().filter(r -> r.insufficientDiskGb).count(); } public long incompatibleDiskSpeed() { return allocationFailureReasons.stream().filter(r -> r.incompatibleDiskSpeed).count(); } public long insufficientAvailableIps() { return allocationFailureReasons.stream().filter(r -> r.insufficientAvailableIPs).count(); } public long violatesParentHostPolicy() { return allocationFailureReasons.stream().filter(r -> r.violatesParentHostPolicy).count(); } public AllocationFailureReasonList singularReasonFailures() { return new AllocationFailureReasonList(allocationFailureReasons.stream() .filter(reason -> reason.numberOfReasons() == 1).collect(Collectors.toList())); } public AllocationFailureReasonList multipleReasonFailures() { return new AllocationFailureReasonList(allocationFailureReasons.stream() .filter(reason -> reason.numberOfReasons() > 1).collect(Collectors.toList())); } public long size() { return allocationFailureReasons.size(); } @Override public String toString() { return String.format("CPU (%3d), Memory (%3d), Disk size (%3d), Disk speed (%3d), IP (%3d), Parent-Host Policy (%3d)", insufficientVcpu(), insufficientMemoryGb(), insufficientDiskGb(), incompatibleDiskSpeed(), insufficientAvailableIps(), violatesParentHostPolicy()); } } public static class AllocationHistory { public static class Entry { public Node tenant; public Node newParent; public long eligibleParents; public Entry(Node tenant, Node newParent, long eligibleParents) { this.tenant = tenant; this.newParent = newParent; this.eligibleParents = eligibleParents; } @Override public String toString() { return String.format("%-20s %-65s -> %15s [%3d valid]", tenant.hostname().replaceFirst("\\..+", ""), tenant.flavor().resources(), newParent == null ? "x" : newParent.hostname().replaceFirst("\\..+", ""), this.eligibleParents ); } } public List<Entry> historyEntries; public AllocationHistory() { this.historyEntries = new ArrayList<>(); } public void addEntry(Node tenant, Node newParent, long eligibleParents) { this.historyEntries.add(new Entry(tenant, newParent, eligibleParents)); } public Set<String> oldParents() { Set<String> oldParents = new HashSet<>(); for (var entry : historyEntries) entry.tenant.parentHostname().ifPresent(oldParents::add); return oldParents; } @Override public String toString() { StringBuilder out = new StringBuilder(); String currentParent = ""; for (var entry : historyEntries) { String parentName = entry.tenant.parentHostname().orElseThrow(); if (!parentName.equals(currentParent)) { currentParent = parentName; out.append(parentName).append("\n"); } out.append(entry.toString()).append("\n"); } return out.toString(); } } }
class CapacityChecker { private List<Node> hosts; Map<String, Node> nodeMap; private Map<Node, List<Node>> nodeChildren; private Map<Node, AllocationResources> availableResources; public AllocationHistory allocationHistory = null; public CapacityChecker(NodeRepository nodeRepository) { this.hosts = getHosts(nodeRepository); List<Node> tenants = getTenants(nodeRepository, hosts); nodeMap = constructHostnameToNodeMap(hosts); this.nodeChildren = constructNodeChildrenMap(tenants, hosts, nodeMap); this.availableResources = constructAvailableResourcesMap(hosts, nodeChildren); } public List<Node> getHosts() { return hosts; } public Optional<HostFailurePath> worstCaseHostLossLeadingToFailure() { Map<Node, Integer> timesNodeCanBeRemoved = computeMaximalRepeatedRemovals(hosts, nodeChildren, availableResources); return greedyHeuristicFindFailurePath(timesNodeCanBeRemoved, hosts, nodeChildren, availableResources); } protected List<Node> findOvercommittedHosts() { return findOvercommittedNodes(availableResources); } public Optional<HostFailurePath> findHostRemovalFailure(List<Node> hostsToRemove) { var removal = findHostRemovalFailure(hostsToRemove, hosts, nodeChildren, availableResources); if (removal.isEmpty()) return Optional.empty(); HostFailurePath failurePath = new HostFailurePath(); failurePath.hostsCausingFailure = hostsToRemove; failurePath.failureReason = removal.get(); return Optional.of(failurePath); } private static Node.State[] relevantNodeStates = { Node.State.active, Node.State.inactive, Node.State.dirty, Node.State.provisioned, Node.State.ready, Node.State.reserved }; private List<Node> getHosts(NodeRepository nodeRepository) { return nodeRepository.getNodes(NodeType.host, relevantNodeStates); } private List<Node> getTenants(NodeRepository nodeRepository, List<Node> hosts) { var parentNames = hosts.stream().map(Node::hostname).collect(Collectors.toSet()); return nodeRepository.getNodes(NodeType.tenant, relevantNodeStates).stream() .filter(t -> parentNames.contains(t.parentHostname().orElse(""))) .collect(Collectors.toList()); } private Optional<HostFailurePath> greedyHeuristicFindFailurePath(Map<Node, Integer> heuristic, List<Node> hosts, Map<Node, List<Node>> nodeChildren, Map<Node, AllocationResources> availableResources) { if (hosts.size() == 0) return Optional.empty(); List<Node> parentRemovalPriorityList = heuristic.entrySet().stream() .sorted(Comparator.comparingInt(Map.Entry::getValue)) .map(Map.Entry::getKey) .collect(Collectors.toList()); for (int i = 1; i <= parentRemovalPriorityList.size(); i++) { List<Node> hostsToRemove = parentRemovalPriorityList.subList(0, i); var hostRemovalFailure = findHostRemovalFailure(hostsToRemove, hosts, nodeChildren, availableResources); if (hostRemovalFailure.isPresent()) { HostFailurePath failurePath = new HostFailurePath(); failurePath.hostsCausingFailure = hostsToRemove; failurePath.failureReason = hostRemovalFailure.get(); return Optional.of(failurePath); } } throw new IllegalStateException("No path to failure found. This should be impossible!"); } private Map<String, Node> constructHostnameToNodeMap(List<Node> nodes) { return nodes.stream().collect(Collectors.toMap(Node::hostname, n -> n)); } private Map<Node, List<Node>> constructNodeChildrenMap(List<Node> tenants, List<Node> hosts, Map<String, Node> hostnameToNode) { Map<Node, List<Node>> nodeChildren = tenants.stream() .filter(n -> n.parentHostname().isPresent()) .filter(n -> hostnameToNode.containsKey(n.parentHostname().get())) .collect(Collectors.groupingBy( n -> hostnameToNode.get(n.parentHostname().orElseThrow()))); for (var host : hosts) nodeChildren.putIfAbsent(host, List.of()); return nodeChildren; } private Map<Node, AllocationResources> constructAvailableResourcesMap(List<Node> hosts, Map<Node, List<Node>> nodeChildren) { Map<Node, AllocationResources> availableResources = new HashMap<>(); for (var host : hosts) { NodeResources hostResources = host.flavor().resources(); int occupiedIps = 0; Set<String> ipPool = host.ipAddressPool().asSet(); for (var child : nodeChildren.get(host)) { hostResources = hostResources.subtract(child.flavor().resources().withDiskSpeed(NodeResources.DiskSpeed.any)); occupiedIps += child.ipAddresses().stream().filter(ipPool::contains).count(); } availableResources.put(host, new AllocationResources(hostResources, host.ipAddressPool().asSet().size() - occupiedIps)); } return availableResources; } /** * Computes a heuristic for each host, with a lower score indicating a higher perceived likelihood that removing * the host causes an unrecoverable state */ private Map<Node, Integer> computeMaximalRepeatedRemovals(List<Node> hosts, Map<Node, List<Node>> nodeChildren, Map<Node, AllocationResources> availableResources) { Map<Node, Integer> timesNodeCanBeRemoved = hosts.stream().collect(Collectors.toMap( Function.identity(), _x -> Integer.MAX_VALUE )); for (Node host : hosts) { List<Node> children = nodeChildren.get(host); if (children.size() == 0) continue; Map<Node, AllocationResources> resourceMap = new HashMap<>(availableResources); Map<Node, List<Allocation>> containedAllocations = collateAllocations(nodeChildren); int timesHostCanBeRemoved = 0; Optional<Node> unallocatedNode; while (timesHostCanBeRemoved < 1000) { unallocatedNode = tryAllocateNodes(nodeChildren.get(host), hosts, resourceMap, containedAllocations); if (unallocatedNode.isEmpty()) { timesHostCanBeRemoved++; } else break; } timesNodeCanBeRemoved.put(host, timesHostCanBeRemoved); } return timesNodeCanBeRemoved; } private List<Node> findOvercommittedNodes(Map<Node, AllocationResources> availableResources) { List<Node> overcommittedNodes = new ArrayList<>(); for (var entry : availableResources.entrySet()) { var resources = entry.getValue().nodeResources; if (resources.vcpu() < 0 || resources.memoryGb() < 0 || resources.diskGb() < 0) { overcommittedNodes.add(entry.getKey()); } } return overcommittedNodes; } private Map<Node, List<Allocation>> collateAllocations(Map<Node, List<Node>> nodeChildren) { return nodeChildren.entrySet().stream().collect(Collectors.toMap( Map.Entry::getKey, e -> e.getValue().stream() .map(Node::allocation).flatMap(Optional::stream) .collect(Collectors.toList()) )); } /** * Tests whether it's possible to remove the provided hosts. * Does not mutate any input variable. * @return Empty optional if removal is possible, information on what caused the failure otherwise */ private Optional<HostRemovalFailure> findHostRemovalFailure(List<Node> hostsToRemove, List<Node> allHosts, Map<Node, List<Node>> nodechildren, Map<Node, AllocationResources> availableResources) { var containedAllocations = collateAllocations(nodechildren); var resourceMap = new HashMap<>(availableResources); List<Node> validAllocationTargets = allHosts.stream() .filter(h -> !hostsToRemove.contains(h)) .collect(Collectors.toList()); if (validAllocationTargets.size() == 0) { return Optional.of(HostRemovalFailure.none()); } allocationHistory = new AllocationHistory(); for (var host : hostsToRemove) { Optional<Node> unallocatedNode = tryAllocateNodes(nodechildren.get(host), validAllocationTargets, resourceMap, containedAllocations, true); if (unallocatedNode.isPresent()) { AllocationFailureReasonList failures = collateAllocationFailures(unallocatedNode.get(), validAllocationTargets, resourceMap, containedAllocations); return Optional.of(HostRemovalFailure.create(host, unallocatedNode.get(), failures)); } } return Optional.empty(); } /** * Attempts to allocate the listed nodes to a new host, mutating availableResources and containedAllocations, * optionally returning the first node to fail, if one does. * */ private Optional<Node> tryAllocateNodes(List<Node> nodes, List<Node> hosts, Map<Node, AllocationResources> availableResources, Map<Node, List<Allocation>> containedAllocations) { return tryAllocateNodes(nodes, hosts, availableResources, containedAllocations, false); } private Optional<Node> tryAllocateNodes(List<Node> nodes, List<Node> hosts, Map<Node, AllocationResources> availableResources, Map<Node, List<Allocation>> containedAllocations, boolean withHistory) { for (var node : nodes) { var newParent = tryAllocateNode(node, hosts, availableResources, containedAllocations); if (newParent.isEmpty()) { if (withHistory) allocationHistory.addEntry(node, null, 0); return Optional.of(node); } if (withHistory) { long eligibleParents = hosts.stream().filter(h -> !violatesParentHostPolicy(node, h, containedAllocations) && availableResources.get(h).satisfies(AllocationResources.from(node.flavor().resources()))).count(); allocationHistory.addEntry(node, newParent.get(), eligibleParents + 1); } } return Optional.empty(); } /** * @return The parent to which the node was allocated, if it was successfully allocated. */ private Optional<Node> tryAllocateNode(Node node, List<Node> hosts, Map<Node, AllocationResources> availableResources, Map<Node, List<Allocation>> containedAllocations) { AllocationResources requiredNodeResources = AllocationResources.from(node.flavor().resources()); for (var host : hosts) { var availableHostResources = availableResources.get(host); if (violatesParentHostPolicy(node, host, containedAllocations)) { continue; } if (availableHostResources.satisfies(requiredNodeResources)) { availableResources.put(host, availableHostResources.subtract(requiredNodeResources)); if (node.allocation().isPresent()) { containedAllocations.get(host).add(node.allocation().get()); } return Optional.of(host); } } return Optional.empty(); } private static boolean violatesParentHostPolicy(Node node, Node host, Map<Node, List<Allocation>> containedAllocations) { if (node.allocation().isEmpty()) return false; Allocation nodeAllocation = node.allocation().get(); for (var allocation : containedAllocations.get(host)) { if (allocation.membership().cluster().equalsIgnoringGroupAndVespaVersion(nodeAllocation.membership().cluster()) && allocation.owner().equals(nodeAllocation.owner())) { return true; } } return false; } private AllocationFailureReasonList collateAllocationFailures(Node node, List<Node> hosts, Map<Node, AllocationResources> availableResources, Map<Node, List<Allocation>> containedAllocations) { List<AllocationFailureReason> allocationFailureReasons = new ArrayList<>(); for (var host : hosts) { AllocationFailureReason reason = new AllocationFailureReason(host); var availableHostResources = availableResources.get(host); reason.violatesParentHostPolicy = violatesParentHostPolicy(node, host, containedAllocations); NodeResources l = availableHostResources.nodeResources; NodeResources r = node.flavor().resources(); if (l.vcpu() < r.vcpu()) { reason.insufficientVcpu = true; } if (l.memoryGb() < r.memoryGb()) { reason.insufficientMemoryGb = true; } if (l.diskGb() < r.diskGb()) { reason.insufficientDiskGb = true; } if (r.diskSpeed() != NodeResources.DiskSpeed.any && r.diskSpeed() != l.diskSpeed()) { reason.incompatibleDiskSpeed = true; } if (availableHostResources.availableIPs < 1) { reason.insufficientAvailableIPs = true; } allocationFailureReasons.add(reason); } return new AllocationFailureReasonList(allocationFailureReasons); } /** * Contains the list of hosts that, upon being removed, caused an unrecoverable state, * as well as the specific host and tenant which caused it. */ public static class HostFailurePath { public List<Node> hostsCausingFailure; public HostRemovalFailure failureReason; } /** * Data class used for detailing why removing the given tenant from the given host was unsuccessful. * A failure might not be caused by failing to allocate a specific tenant, in which case the fields * will be empty. */ public static class HostRemovalFailure { public Optional<Node> host; public Optional<Node> tenant; public AllocationFailureReasonList allocationFailures; public static HostRemovalFailure none() { return new HostRemovalFailure( Optional.empty(), Optional.empty(), new AllocationFailureReasonList(List.of())); } public static HostRemovalFailure create(Node host, Node tenant, AllocationFailureReasonList failureReasons) { return new HostRemovalFailure( Optional.of(host), Optional.of(tenant), failureReasons); } private HostRemovalFailure(Optional<Node> host, Optional<Node> tenant, AllocationFailureReasonList allocationFailures) { this.host = host; this.tenant = tenant; this.allocationFailures = allocationFailures; } @Override public String toString() { if (host.isEmpty() || tenant.isEmpty()) return "No removal candidates exists."; return String.format( "Failure to remove host %s" + "\n\tNo new host found for tenant %s:" + "\n\t\tSingular Reasons: %s" + "\n\t\tTotal Reasons: %s", this.host.get().hostname(), this.tenant.get().hostname(), this.allocationFailures.singularReasonFailures().toString(), this.allocationFailures.toString() ); } } /** * Used to describe the resources required for a tenant, and available to a host. */ private static class AllocationResources { NodeResources nodeResources; int availableIPs; public static AllocationResources from(NodeResources nodeResources) { return new AllocationResources(nodeResources, 1); } public AllocationResources(NodeResources nodeResources, int availableIPs) { this.nodeResources = nodeResources; this.availableIPs = availableIPs; } public boolean satisfies(AllocationResources other) { if (!this.nodeResources.satisfies(other.nodeResources)) return false; return this.availableIPs >= other.availableIPs; } public AllocationResources subtract(AllocationResources other) { return new AllocationResources(this.nodeResources.subtract(other.nodeResources), this.availableIPs - other.availableIPs); } } /** * Keeps track of the reason why a host rejected an allocation. */ private static class AllocationFailureReason { Node host; public AllocationFailureReason (Node host) { this.host = host; } public boolean insufficientVcpu = false; public boolean insufficientMemoryGb = false; public boolean insufficientDiskGb = false; public boolean incompatibleDiskSpeed = false; public boolean insufficientAvailableIPs = false; public boolean violatesParentHostPolicy = false; public int numberOfReasons() { int n = 0; if (insufficientVcpu) n++; if (insufficientMemoryGb) n++; if (insufficientDiskGb) n++; if (incompatibleDiskSpeed) n++; if (insufficientAvailableIPs) n++; if (violatesParentHostPolicy) n++; return n; } @Override public String toString() { List<String> reasons = new ArrayList<>(); if (insufficientVcpu) reasons.add("insufficientVcpu"); if (insufficientMemoryGb) reasons.add("insufficientMemoryGb"); if (insufficientDiskGb) reasons.add("insufficientDiskGb"); if (incompatibleDiskSpeed) reasons.add("incompatibleDiskSpeed"); if (insufficientAvailableIPs) reasons.add("insufficientAvailableIPs"); if (violatesParentHostPolicy) reasons.add("violatesParentHostPolicy"); return String.format("[%s]", String.join(", ", reasons)); } } /** * Provides convenient methods for tallying failures. */ public static class AllocationFailureReasonList { private List<AllocationFailureReason> allocationFailureReasons; public AllocationFailureReasonList(List<AllocationFailureReason> allocationFailureReasons) { this.allocationFailureReasons = allocationFailureReasons; } public long insufficientVcpu() { return allocationFailureReasons.stream().filter(r -> r.insufficientVcpu).count(); } public long insufficientMemoryGb() { return allocationFailureReasons.stream().filter(r -> r.insufficientMemoryGb).count(); } public long insufficientDiskGb() { return allocationFailureReasons.stream().filter(r -> r.insufficientDiskGb).count(); } public long incompatibleDiskSpeed() { return allocationFailureReasons.stream().filter(r -> r.incompatibleDiskSpeed).count(); } public long insufficientAvailableIps() { return allocationFailureReasons.stream().filter(r -> r.insufficientAvailableIPs).count(); } public long violatesParentHostPolicy() { return allocationFailureReasons.stream().filter(r -> r.violatesParentHostPolicy).count(); } public AllocationFailureReasonList singularReasonFailures() { return new AllocationFailureReasonList(allocationFailureReasons.stream() .filter(reason -> reason.numberOfReasons() == 1).collect(Collectors.toList())); } public AllocationFailureReasonList multipleReasonFailures() { return new AllocationFailureReasonList(allocationFailureReasons.stream() .filter(reason -> reason.numberOfReasons() > 1).collect(Collectors.toList())); } public long size() { return allocationFailureReasons.size(); } @Override public String toString() { return String.format("CPU (%3d), Memory (%3d), Disk size (%3d), Disk speed (%3d), IP (%3d), Parent-Host Policy (%3d)", insufficientVcpu(), insufficientMemoryGb(), insufficientDiskGb(), incompatibleDiskSpeed(), insufficientAvailableIps(), violatesParentHostPolicy()); } } public static class AllocationHistory { public static class Entry { public Node tenant; public Node newParent; public long eligibleParents; public Entry(Node tenant, Node newParent, long eligibleParents) { this.tenant = tenant; this.newParent = newParent; this.eligibleParents = eligibleParents; } @Override public String toString() { return String.format("%-20s %-65s -> %15s [%3d valid]", tenant.hostname().replaceFirst("\\..+", ""), tenant.flavor().resources(), newParent == null ? "x" : newParent.hostname().replaceFirst("\\..+", ""), this.eligibleParents ); } } public List<Entry> historyEntries; public AllocationHistory() { this.historyEntries = new ArrayList<>(); } public void addEntry(Node tenant, Node newParent, long eligibleParents) { this.historyEntries.add(new Entry(tenant, newParent, eligibleParents)); } public Set<String> oldParents() { Set<String> oldParents = new HashSet<>(); for (var entry : historyEntries) entry.tenant.parentHostname().ifPresent(oldParents::add); return oldParents; } @Override public String toString() { StringBuilder out = new StringBuilder(); String currentParent = ""; for (var entry : historyEntries) { String parentName = entry.tenant.parentHostname().orElseThrow(); if (!parentName.equals(currentParent)) { currentParent = parentName; out.append(parentName).append("\n"); } out.append(entry.toString()).append("\n"); } return out.toString(); } } }
Seeing this line made me realize we probably should do some renaming of the internal classes and their attribute names. `failure.get().failureReason.failureReasons` can be quite confusing.
public HostCapacityResponse(NodeRepository nodeRepository, HttpRequest request) { super(200); capacityChecker = new CapacityChecker(nodeRepository); json = request.getBooleanProperty("json"); String hostsJson = request.getProperty("hosts"); text = new StringBuilder(); slime = new Slime(); Cursor root = slime.setObject(); if (hostsJson != null) { ObjectMapper om = new ObjectMapper(); String[] hostsArray; try { hostsArray = om.readValue(hostsJson, String[].class); } catch (Exception e) { throw new IllegalArgumentException(e.getMessage()); } List<String> hostNames = Arrays.asList(hostsArray); List<Node> hosts; try { hosts = capacityChecker.nodesFromHostnames(hostNames); } catch (IllegalArgumentException e) { throw new NotFoundException(e.getMessage()); } var failure = capacityChecker.findHostRemovalFailure(hosts); if (failure.isPresent() && failure.get().failureReason.failureReasons.size() == 0) { root.setBool("removalPossible", false); error(root, "Removing all hosts is trivially impossible."); } else { if (json) hostLossPossibleToSlime(root, failure, hosts); else hostLossPossibleToText(failure, hosts); } } else { var failurePath = capacityChecker.worstCaseHostLossLeadingToFailure(); if (failurePath.isPresent()) { if (json) zoneFailurePathToSlime(root, failurePath.get()); else zoneFailurePathToText(failurePath.get()); } else { error(root, "Node repository contained no hosts."); } } }
if (failure.isPresent() && failure.get().failureReason.failureReasons.size() == 0) {
public HostCapacityResponse(NodeRepository nodeRepository, HttpRequest request) { super(200); capacityChecker = new CapacityChecker(nodeRepository); json = request.getBooleanProperty("json"); String hostsJson = request.getProperty("hosts"); text = new StringBuilder(); slime = new Slime(); Cursor root = slime.setObject(); if (hostsJson != null) { List<Node> hosts = parseHostList(hostsJson); hostRemovalResponse(root, hosts); } else { zoneFailureReponse(root); } }
class HostCapacityResponse extends HttpResponse { private final StringBuilder text; private final Slime slime; private final CapacityChecker capacityChecker; private final boolean json; private void error(Cursor root, String errorMessage) { if (json) root.setString("error", errorMessage); else text.append(errorMessage); } private void hostLossPossibleToText(Optional<CapacityChecker.HostFailurePath> failure, List<Node> hostsToRemove) { text.append(String.format("Attempting to remove %d hosts: ", hostsToRemove.size())); CapacityChecker.AllocationHistory history = capacityChecker.allocationHistory; if (failure.isEmpty()) { text.append("OK\n\n"); text.append(history); if (history.oldParents().size() != hostsToRemove.size()) { long emptyHostCount = hostsToRemove.size() - history.oldParents().size(); text.append(String.format("\nTrivially removed %d empty host%s.", emptyHostCount, emptyHostCount > 1 ? "s" : "")); } } else { text.append("FAILURE\n\n"); text.append(history).append("\n"); text.append(failure.get().failureReason).append("\n\n"); } } private void zoneFailurePathToText(CapacityChecker.HostFailurePath failurePath) { text.append(String.format("Found %d hosts. Failure upon trying to remove %d hosts:\n\n", capacityChecker.getHosts().size(), failurePath.hostsCausingFailure.size())); text.append(capacityChecker.allocationHistory).append("\n"); text.append(failurePath.failureReason); } private void hostLossPossibleToSlime(Cursor root, Optional<CapacityChecker.HostFailurePath> failure, List<Node> hostsToRemove) { var hosts = root.setArray("hostsToRemove"); hostsToRemove.forEach(h -> hosts.addString(h.hostname())); CapacityChecker.AllocationHistory history = capacityChecker.allocationHistory; if (failure.isEmpty()) { root.setBool("removalPossible", true); } else { root.setBool("removalPossible", false); } var arr = root.setArray("history"); for (var entry : history.historyEntries) { var object = arr.addObject(); object.setString("tenant", entry.tenant.hostname()); if (entry.newParent != null) { object.setString("newParent", entry.newParent.hostname()); } object.setLong("eligibleParents", entry.eligibleParents); } } public void zoneFailurePathToSlime(Cursor object, CapacityChecker.HostFailurePath failurePath) { object.setLong("totalHosts", capacityChecker.getHosts().size()); object.setLong("couldLoseHosts", failurePath.hostsCausingFailure.size()); failurePath.failureReason.host.ifPresent(host -> object.setString("failedTenantParent", host.hostname()) ); failurePath.failureReason.tenant.ifPresent(tenant -> { object.setString("failedTenant", tenant.hostname()); object.setString("failedTenantResources", tenant.flavor().resources().toString()); tenant.allocation().ifPresent(allocation -> object.setString("failedTenantAllocation", allocation.toString()) ); var explanation = object.setObject("hostCandidateRejectionReasons"); allocationFailureReasonListToSlime(explanation.setObject("singularReasonFailures"), failurePath.failureReason.failureReasons.singularReasonFailures()); allocationFailureReasonListToSlime(explanation.setObject("totalFailures"), failurePath.failureReason.failureReasons); }); var details = object.setObject("details"); hostLossPossibleToSlime(details, Optional.of(failurePath), failurePath.hostsCausingFailure); } private void allocationFailureReasonListToSlime(Cursor root, CapacityChecker.AllocationFailureReasonList allocationFailureReasonList) { root.setLong("insufficientVcpu", allocationFailureReasonList.insufficientVcpu()); root.setLong("insufficientMemoryGb", allocationFailureReasonList.insufficientMemoryGb()); root.setLong("insufficientDiskGb", allocationFailureReasonList.insufficientDiskGb()); root.setLong("incompatibleDiskSpeed", allocationFailureReasonList.incompatibleDiskSpeed()); root.setLong("insufficientAvailableIps", allocationFailureReasonList.insufficientAvailableIps()); root.setLong("violatesParentHostPolicy", allocationFailureReasonList.violatesParentHostPolicy()); } @Override public void render(OutputStream stream) throws IOException { if (json) new JsonFormat(true).encode(stream, slime); else stream.write(text.toString().getBytes()); } @Override public String getContentType() { return json ? "application/json" : "text/plain"; } }
class HostCapacityResponse extends HttpResponse { private final StringBuilder text; private final Slime slime; private final CapacityChecker capacityChecker; private final boolean json; private List<Node> parseHostList(String hosts) { ObjectMapper om = new ObjectMapper(); String[] hostsArray; try { hostsArray = om.readValue(hosts, String[].class); } catch (Exception e) { throw new IllegalArgumentException(e.getMessage()); } List<String> hostNames = Arrays.asList(hostsArray); try { return capacityChecker.nodesFromHostnames(hostNames); } catch (IllegalArgumentException e) { throw new NotFoundException(e.getMessage()); } } private void hostRemovalResponse(Cursor root, List<Node> hosts) { var failure = capacityChecker.findHostRemovalFailure(hosts); if (failure.isPresent() && failure.get().failureReason.allocationFailures.size() == 0) { root.setBool("removalPossible", false); error(root, "Removing all hosts is trivially impossible."); } else { if (json) hostLossPossibleToSlime(root, failure, hosts); else hostLossPossibleToText(failure, hosts); } } private void zoneFailureReponse(Cursor root) { var failurePath = capacityChecker.worstCaseHostLossLeadingToFailure(); if (failurePath.isPresent()) { if (json) zoneFailurePathToSlime(root, failurePath.get()); else zoneFailurePathToText(failurePath.get()); } else { error(root, "Node repository contained no hosts."); } } private void error(Cursor root, String errorMessage) { if (json) root.setString("error", errorMessage); else text.append(errorMessage); } private void hostLossPossibleToText(Optional<CapacityChecker.HostFailurePath> failure, List<Node> hostsToRemove) { text.append(String.format("Attempting to remove %d hosts: ", hostsToRemove.size())); CapacityChecker.AllocationHistory history = capacityChecker.allocationHistory; if (failure.isEmpty()) { text.append("OK\n\n"); text.append(history); if (history.oldParents().size() != hostsToRemove.size()) { long emptyHostCount = hostsToRemove.size() - history.oldParents().size(); text.append(String.format("\nTrivially removed %d empty host%s.", emptyHostCount, emptyHostCount > 1 ? "s" : "")); } } else { text.append("FAILURE\n\n"); text.append(history).append("\n"); text.append(failure.get().failureReason).append("\n\n"); } } private void zoneFailurePathToText(CapacityChecker.HostFailurePath failurePath) { text.append(String.format("Found %d hosts. Failure upon trying to remove %d hosts:\n\n", capacityChecker.getHosts().size(), failurePath.hostsCausingFailure.size())); text.append(capacityChecker.allocationHistory).append("\n"); text.append(failurePath.failureReason); } private void hostLossPossibleToSlime(Cursor root, Optional<CapacityChecker.HostFailurePath> failure, List<Node> hostsToRemove) { var hosts = root.setArray("hostsToRemove"); hostsToRemove.forEach(h -> hosts.addString(h.hostname())); CapacityChecker.AllocationHistory history = capacityChecker.allocationHistory; root.setBool("removalPossible", failure.isEmpty()); var arr = root.setArray("history"); for (var entry : history.historyEntries) { var object = arr.addObject(); object.setString("tenant", entry.tenant.hostname()); if (entry.newParent != null) { object.setString("newParent", entry.newParent.hostname()); } object.setLong("eligibleParents", entry.eligibleParents); } } private void zoneFailurePathToSlime(Cursor object, CapacityChecker.HostFailurePath failurePath) { object.setLong("totalHosts", capacityChecker.getHosts().size()); object.setLong("couldLoseHosts", failurePath.hostsCausingFailure.size()); failurePath.failureReason.host.ifPresent(host -> object.setString("failedTenantParent", host.hostname()) ); failurePath.failureReason.tenant.ifPresent(tenant -> { object.setString("failedTenant", tenant.hostname()); object.setString("failedTenantResources", tenant.flavor().resources().toString()); tenant.allocation().ifPresent(allocation -> object.setString("failedTenantAllocation", allocation.toString()) ); var explanation = object.setObject("hostCandidateRejectionReasons"); allocationFailureReasonListToSlime(explanation.setObject("singularReasonFailures"), failurePath.failureReason.allocationFailures.singularReasonFailures()); allocationFailureReasonListToSlime(explanation.setObject("totalFailures"), failurePath.failureReason.allocationFailures); }); var details = object.setObject("details"); hostLossPossibleToSlime(details, Optional.of(failurePath), failurePath.hostsCausingFailure); } private void allocationFailureReasonListToSlime(Cursor root, CapacityChecker.AllocationFailureReasonList allocationFailureReasonList) { root.setLong("insufficientVcpu", allocationFailureReasonList.insufficientVcpu()); root.setLong("insufficientMemoryGb", allocationFailureReasonList.insufficientMemoryGb()); root.setLong("insufficientDiskGb", allocationFailureReasonList.insufficientDiskGb()); root.setLong("incompatibleDiskSpeed", allocationFailureReasonList.incompatibleDiskSpeed()); root.setLong("insufficientAvailableIps", allocationFailureReasonList.insufficientAvailableIps()); root.setLong("violatesParentHostPolicy", allocationFailureReasonList.violatesParentHostPolicy()); } @Override public void render(OutputStream stream) throws IOException { if (json) new JsonFormat(true).encode(stream, slime); else stream.write(text.toString().getBytes()); } @Override public String getContentType() { return json ? "application/json" : "text/plain"; } }
abstract visitRelation from visitFromClause
public LogicalPlan visitAliasedRelation(AliasedRelationContext ctx) { return withTableAlias((LogicalPlan) visitRelation(ctx.relation()), ctx.tableAlias()); }
return withTableAlias((LogicalPlan) visitRelation(ctx.relation()), ctx.tableAlias());
public LogicalPlan visitAliasedRelation(AliasedRelationContext ctx) { return withTableAlias(visitRelation(ctx.relation()), ctx.tableAlias()); }
class LogicalPlanBuilder extends DorisParserBaseVisitor<Object> { protected <T> T typedVisit(ParseTree ctx) { return (T) ctx.accept(this); } /** * Override the default behavior for all visit methods. This will only return a non-null result * when the context has only one child. This is done because there is no generic method to * combine the results of the context children. In all other cases null is returned. */ @Override public Object visitChildren(RuleNode node) { if (node.getChildCount() == 1) { return node.getChild(0).accept(this); } else { return null; } } @Override public LogicalPlan visitSingleStatement(SingleStatementContext ctx) { return ParserUtils.withOrigin(ctx, () -> (LogicalPlan) visit(ctx.statement())); } /** * Visit multi-statements. */ @Override public List<LogicalPlan> visitMultiStatements(MultiStatementsContext ctx) { return visit(ctx.statement(), LogicalPlan.class); } /* ******************************************************************************************** * Plan parsing * ******************************************************************************************** */ @Override public Command visitExplain(ExplainContext ctx) { LogicalPlan logicalPlan = plan(ctx.query()); ExplainLevel explainLevel = ExplainLevel.NORMAL; if (ctx.level != null) { explainLevel = ExplainLevel.valueOf(ctx.level.getText().toUpperCase(Locale.ROOT)); } return new ExplainCommand(explainLevel, logicalPlan); } @Override public LogicalPlan visitQuery(QueryContext ctx) { return ParserUtils.withOrigin(ctx, () -> { LogicalPlan query = plan(ctx.queryTerm()); LogicalPlan queryOrganization = withQueryOrganization(query, ctx.queryOrganization()); return queryOrganization; }); } @Override public LogicalPlan visitRegularQuerySpecification(RegularQuerySpecificationContext ctx) { return ParserUtils.withOrigin(ctx, () -> { LogicalPlan relation = withRelation(Optional.ofNullable(ctx.fromClause())); return withSelectQuerySpecification( ctx, relation, ctx.selectClause(), Optional.ofNullable(ctx.whereClause()), Optional.ofNullable(ctx.aggClause()) ); }); } /** * Create an aliased table reference. This is typically used in FROM clauses. */ @Developing private LogicalPlan withTableAlias(LogicalPlan plan, TableAliasContext ctx) { String alias = ctx.strictIdentifier().getText(); if (null != ctx.identifierList()) { throw new ParseException("Do not implemented", ctx); } return new LogicalSubQueryAlias<>(alias, plan); } @Override public LogicalPlan visitTableName(TableNameContext ctx) { List<String> tableId = visitMultipartIdentifier(ctx.multipartIdentifier()); if (null == ctx.tableAlias().strictIdentifier()) { return new UnboundRelation(tableId); } return withTableAlias(new UnboundRelation(tableId), ctx.tableAlias()); } @Override public LogicalPlan visitAliasedQuery(AliasedQueryContext ctx) { return withTableAlias(visitQuery(ctx.query()), ctx.tableAlias()); } @Override /** * Create a star (i.e. all) expression; this selects all elements (in the specified object). * Both un-targeted (global) and targeted aliases are supported. */ @Override public Expression visitStar(StarContext ctx) { return ParserUtils.withOrigin(ctx, () -> { final QualifiedNameContext qualifiedNameContext = ctx.qualifiedName(); List<String> target; if (qualifiedNameContext != null) { target = qualifiedNameContext.identifier() .stream() .map(RuleContext::getText) .collect(ImmutableList.toImmutableList()); } else { target = Collections.emptyList(); } return new UnboundStar(target); }); } /** * Create an aliased expression if an alias is specified. Both single and multi-aliases are * supported. */ @Override public Expression visitNamedExpression(NamedExpressionContext ctx) { return ParserUtils.withOrigin(ctx, () -> { Expression expression = getExpression(ctx.expression()); if (ctx.name != null) { return new Alias(expression, ctx.name.getText()); } else { return expression; } }); } /** * Create a comparison expression. This compares two expressions. The following comparison * operators are supported: * - Equal: '=' or '==' * - Null-safe Equal: '<=>' * - Not Equal: '<>' or '!=' * - Less than: '<' * - Less then or Equal: '<=' * - Greater than: '>' * - Greater then or Equal: '>=' */ @Override public Expression visitComparison(ComparisonContext ctx) { return ParserUtils.withOrigin(ctx, () -> { Expression left = getExpression(ctx.left); Expression right = getExpression(ctx.right); TerminalNode operator = (TerminalNode) ctx.comparisonOperator().getChild(0); switch (operator.getSymbol().getType()) { case DorisParser.EQ: return new EqualTo(left, right); case DorisParser.NEQ: return new Not(new EqualTo(left, right)); case DorisParser.LT: return new LessThan(left, right); case DorisParser.GT: return new GreaterThan(left, right); case DorisParser.LTE: return new LessThanEqual(left, right); case DorisParser.GTE: return new GreaterThanEqual(left, right); case DorisParser.NSEQ: return new NullSafeEqual(left, right); default: throw new IllegalStateException("Unsupported comparison expression: " + operator.getSymbol().getText()); } }); } /** * Create a not expression. * format: NOT Expression * for example: * not 1 * not 1=1 */ @Override public Expression visitLogicalNot(LogicalNotContext ctx) { return ParserUtils.withOrigin(ctx, () -> new Not(getExpression(ctx.booleanExpression()))); } @Override public Expression visitLogicalBinary(LogicalBinaryContext ctx) { return ParserUtils.withOrigin(ctx, () -> { Expression left = getExpression(ctx.left); Expression right = getExpression(ctx.right); switch (ctx.operator.getType()) { case DorisParser.AND: return new And(left, right); case DorisParser.OR: return new Or(left, right); default: throw new IllegalStateException("Unsupported logical binary type: " + ctx.operator.getText()); } }); } /** * Create a predicated expression. A predicated expression is a normal expression with a * predicate attached to it, for example: * {{{ * a + 1 IS NULL * }}} */ @Override public Expression visitPredicated(PredicatedContext ctx) { return ParserUtils.withOrigin(ctx, () -> { Expression e = getExpression(ctx.valueExpression()); return ctx.predicate() == null ? e : withPredicate(e, ctx.predicate()); }); } @Override public Expression visitArithmeticUnary(ArithmeticUnaryContext ctx) { return ParserUtils.withOrigin(ctx, () -> { Expression e = getExpression(ctx); switch (ctx.operator.getType()) { case DorisParser.PLUS: return e; case DorisParser.MINUS: default: throw new IllegalStateException("Unsupported arithmetic unary type: " + ctx.operator.getText()); } }); } @Override public Expression visitArithmeticBinary(ArithmeticBinaryContext ctx) { return ParserUtils.withOrigin(ctx, () -> { Expression left = getExpression(ctx.left); Expression right = getExpression(ctx.right); int type = ctx.operator.getType(); if (left instanceof IntervalLiteral) { if (type != DorisParser.PLUS) { throw new IllegalArgumentException("Only supported: " + Operator.ADD); } IntervalLiteral interval = (IntervalLiteral) left; return new TimestampArithmetic(Operator.ADD, right, interval.value(), interval.timeUnit(), true); } if (right instanceof IntervalLiteral) { Operator op; if (type == DorisParser.PLUS) { op = Operator.ADD; } else if (type == DorisParser.MINUS) { op = Operator.SUBTRACT; } else { throw new IllegalArgumentException("Only supported: " + Operator.ADD + " and " + Operator.SUBTRACT); } IntervalLiteral interval = (IntervalLiteral) right; return new TimestampArithmetic(op, left, interval.value(), interval.timeUnit(), false); } return ParserUtils.withOrigin(ctx, () -> { switch (type) { case DorisParser.ASTERISK: return new Multiply(left, right); case DorisParser.SLASH: return new Divide(left, right); case DorisParser.PERCENT: return new Mod(left, right); case DorisParser.PLUS: return new Add(left, right); case DorisParser.MINUS: return new Subtract(left, right); default: throw new IllegalStateException( "Unsupported arithmetic binary type: " + ctx.operator.getText()); } }); }); } /** * Create a value based [[CaseWhen]] expression. This has the following SQL form: * {{{ * CASE [expression] * WHEN [value] THEN [expression] * ... * ELSE [expression] * END * }}} */ @Override public Expression visitSimpleCase(DorisParser.SimpleCaseContext context) { Expression e = getExpression(context.value); List<WhenClause> whenClauses = context.whenClause().stream() .map(w -> new WhenClause(new EqualTo(e, getExpression(w.condition)), getExpression(w.result))) .collect(Collectors.toList()); if (context.elseExpression == null) { return new CaseWhen(whenClauses); } return new CaseWhen(whenClauses, getExpression(context.elseExpression)); } /** * Create a condition based [[CaseWhen]] expression. This has the following SQL syntax: * {{{ * CASE * WHEN [predicate] THEN [expression] * ... * ELSE [expression] * END * }}} * * @param context the parse tree */ @Override public Expression visitSearchedCase(DorisParser.SearchedCaseContext context) { List<WhenClause> whenClauses = context.whenClause().stream() .map(w -> new WhenClause(getExpression(w.condition), getExpression(w.result))) .collect(Collectors.toList()); if (context.elseExpression == null) { return new CaseWhen(whenClauses); } return new CaseWhen(whenClauses, getExpression(context.elseExpression)); } @Override public Expression visitCast(DorisParser.CastContext ctx) { return ParserUtils.withOrigin(ctx, () -> new Cast(getExpression(ctx.expression()), ctx.identifier().getText())); } @Override public UnboundFunction visitExtract(DorisParser.ExtractContext ctx) { return ParserUtils.withOrigin(ctx, () -> { String functionName = ctx.field.getText(); return new UnboundFunction(functionName, false, Arrays.asList(getExpression(ctx.source))); }); } @Override public UnboundFunction visitFunctionCall(DorisParser.FunctionCallContext ctx) { return ParserUtils.withOrigin(ctx, () -> { String functionName = ctx.identifier().getText(); boolean isDistinct = ctx.DISTINCT() != null; List<Expression> params = visit(ctx.expression(), Expression.class); return new UnboundFunction(functionName, isDistinct, params); }); } @Override public Expression visitInterval(IntervalContext ctx) { return new IntervalLiteral(getExpression(ctx.value), visitUnitIdentifier(ctx.unit)); } @Override public String visitUnitIdentifier(UnitIdentifierContext ctx) { return ctx.getText(); } @Override public Expression visitTypeConstructor(TypeConstructorContext ctx) { String value = ctx.STRING().getText(); value = value.substring(1, value.length() - 1); String type = ctx.identifier().getText().toUpperCase(); switch (type) { case "DATE": return new DateLiteral(value); case "DATETIME": return new DateTimeLiteral(value); default: throw new IllegalStateException("Unsupported data type : " + type); } } @Override public Expression visitDereference(DereferenceContext ctx) { return ParserUtils.withOrigin(ctx, () -> { Expression e = getExpression(ctx.base); if (e instanceof UnboundSlot) { UnboundSlot unboundAttribute = (UnboundSlot) e; List<String> nameParts = Lists.newArrayList(unboundAttribute.getNameParts()); nameParts.add(ctx.fieldName.getText()); return new UnboundSlot(nameParts); } else { throw new IllegalStateException("Unsupported dereference expression: " + ctx.getText()); } }); } @Override public UnboundSlot visitColumnReference(ColumnReferenceContext ctx) { return UnboundSlot.quoted(ctx.getText()); } /** * Create a NULL literal expression. */ @Override public Expression visitNullLiteral(NullLiteralContext ctx) { return new NullLiteral(); } @Override public Literal visitBooleanLiteral(BooleanLiteralContext ctx) { Boolean b = Boolean.valueOf(ctx.getText()); return new BooleanLiteral(b); } @Override public Literal visitIntegerLiteral(IntegerLiteralContext ctx) { Integer l = Integer.valueOf(ctx.getText()); return new IntegerLiteral(l); } @Override public Literal visitStringLiteral(StringLiteralContext ctx) { String s = ctx.STRING().stream() .map(ParseTree::getText) .map(str -> str.substring(1, str.length() - 1)) .reduce((s1, s2) -> s1 + s2) .orElse(""); return new StringLiteral(s); } @Override public Expression visitParenthesizedExpression(ParenthesizedExpressionContext ctx) { return getExpression(ctx.expression()); } @Override public List<Expression> visitNamedExpressionSeq(NamedExpressionSeqContext namedCtx) { return visit(namedCtx.namedExpression(), Expression.class); } /** * Create OrderKey list. * * @param ctx QueryOrganizationContext * @return List of OrderKey */ @Override public List<OrderKey> visitQueryOrganization(QueryOrganizationContext ctx) { return ParserUtils.withOrigin(ctx, () -> { if (ctx.sortClause().ORDER() != null) { return visit(ctx.sortClause().sortItem(), OrderKey.class); } else { return ImmutableList.of(); } }); } @Override public LogicalPlan visitFromClause(FromClauseContext ctx) { return ParserUtils.withOrigin(ctx, () -> { LogicalPlan left = null; for (RelationContext relation : ctx.relation()) { LogicalPlan right = plan(relation.relationPrimary()); left = left == null ? right : new LogicalJoin(JoinType.INNER_JOIN, Optional.empty(), left, right); left = withJoinRelations(left, relation); } return left; }); } /* ******************************************************************************************** * Table Identifier parsing * ******************************************************************************************** */ @Override public List<String> visitMultipartIdentifier(MultipartIdentifierContext ctx) { return ctx.parts.stream() .map(RuleContext::getText) .collect(ImmutableList.toImmutableList()); } /** * Create a Sequence of Strings for a parenthesis enclosed alias list. */ @Override public List<String> visitIdentifierList(IdentifierListContext ctx) { return visitIdentifierSeq(ctx.identifierSeq()); } /** * Create a Sequence of Strings for an identifier list. */ @Override public List<String> visitIdentifierSeq(IdentifierSeqContext ctx) { return ctx.ident.stream() .map(RuleContext::getText) .collect(ImmutableList.toImmutableList()); } /** * get OrderKey. * * @param ctx SortItemContext * @return SortItems */ @Override public OrderKey visitSortItem(SortItemContext ctx) { return ParserUtils.withOrigin(ctx, () -> { boolean isAsc = ctx.DESC() == null; boolean isNullFirst = true; Expression expression = typedVisit(ctx.expression()); return new OrderKey(expression, isAsc, isNullFirst); }); } private <T> List<T> visit(List<? extends ParserRuleContext> contexts, Class<T> clazz) { return contexts.stream() .map(this::visit) .map(clazz::cast) .collect(ImmutableList.toImmutableList()); } private LogicalPlan plan(ParserRuleContext tree) { return (LogicalPlan) tree.accept(this); } /* ******************************************************************************************** * Expression parsing * ******************************************************************************************** */ /** * Create an expression from the given context. This method just passes the context on to the * visitor and only takes care of typing (We assume that the visitor returns an Expression here). */ private Expression getExpression(ParserRuleContext ctx) { return typedVisit(ctx); } private LogicalPlan withQueryOrganization(LogicalPlan children, QueryOrganizationContext ctx) { List<OrderKey> orderKeys = visitQueryOrganization(ctx); return orderKeys.isEmpty() ? children : new LogicalSort(orderKeys, children); } /** * Add a regular (SELECT) query specification to a logical plan. The query specification * is the core of the logical plan, this is where sourcing (FROM clause), projection (SELECT), * aggregation (GROUP BY ... HAVING ...) and filtering (WHERE) takes place. * * <p>Note that query hints are ignored (both by the parser and the builder). */ private LogicalPlan withSelectQuerySpecification( ParserRuleContext ctx, LogicalPlan inputRelation, SelectClauseContext selectClause, Optional<WhereClauseContext> whereClause, Optional<AggClauseContext> aggClause) { return ParserUtils.withOrigin(ctx, () -> { LogicalPlan filter = withFilter(inputRelation, whereClause); LogicalPlan aggregate = withAggregate(filter, selectClause, aggClause); LogicalPlan having = aggregate; LogicalPlan projection = withProjection(having, selectClause, aggClause); return projection; }); } private LogicalPlan withRelation(Optional<FromClauseContext> ctx) { if (ctx.isPresent()) { return visitFromClause(ctx.get()); } else { throw new IllegalStateException("Unsupported one row relation"); } } /** * Join one more [[LogicalPlan]]s to the current logical plan. */ private LogicalPlan withJoinRelations(LogicalPlan input, RelationContext ctx) { LogicalPlan last = input; for (JoinRelationContext join : ctx.joinRelation()) { JoinType joinType; if (join.joinType().LEFT() != null) { joinType = JoinType.LEFT_OUTER_JOIN; } else if (join.joinType().RIGHT() != null) { joinType = JoinType.RIGHT_OUTER_JOIN; } else if (join.joinType().FULL() != null) { joinType = JoinType.FULL_OUTER_JOIN; } else if (join.joinType().SEMI() != null) { joinType = JoinType.LEFT_SEMI_JOIN; } else if (join.joinType().ANTI() != null) { joinType = JoinType.LEFT_ANTI_JOIN; } else if (join.joinType().CROSS() != null) { joinType = JoinType.CROSS_JOIN; } else { joinType = JoinType.INNER_JOIN; } JoinCriteriaContext joinCriteria = join.joinCriteria(); Expression condition; if (joinCriteria == null) { condition = null; } else { condition = getExpression(joinCriteria.booleanExpression()); } last = new LogicalJoin(joinType, Optional.ofNullable(condition), last, plan(join.relationPrimary())); } return last; } private LogicalPlan withProjection(LogicalPlan input, SelectClauseContext selectCtx, Optional<AggClauseContext> aggCtx) { return ParserUtils.withOrigin(selectCtx, () -> { if (aggCtx.isPresent()) { return input; } else { List<NamedExpression> projects = getNamedExpressions(selectCtx.namedExpressionSeq()); return new LogicalProject(projects, input); } }); } private LogicalPlan withFilter(LogicalPlan input, Optional<WhereClauseContext> whereCtx) { return input.optionalMap(whereCtx, () -> new LogicalFilter(getExpression((whereCtx.get().booleanExpression())), input) ); } private LogicalPlan withAggregate(LogicalPlan input, SelectClauseContext selectCtx, Optional<AggClauseContext> aggCtx) { return input.optionalMap(aggCtx, () -> { List<Expression> groupByExpressions = visit(aggCtx.get().groupByItem().expression(), Expression.class); List<NamedExpression> namedExpressions = getNamedExpressions(selectCtx.namedExpressionSeq()); return new LogicalAggregate(groupByExpressions, namedExpressions, input); }); } /** * match predicate type and generate different predicates. * * @param ctx PredicateContext * @param valueExpression valueExpression * @return Expression */ private Expression withPredicate(Expression valueExpression, PredicateContext ctx) { return ParserUtils.withOrigin(ctx, () -> { Expression outExpression; switch (ctx.kind.getType()) { case DorisParser.BETWEEN: outExpression = new Between( valueExpression, getExpression(ctx.lower), getExpression(ctx.upper) ); break; case DorisParser.LIKE: outExpression = new Like( valueExpression, getExpression(ctx.pattern) ); break; case DorisParser.REGEXP: outExpression = new Regexp( valueExpression, getExpression(ctx.pattern) ); break; case DorisParser.IN: if (ctx.query() == null) { outExpression = null; throw new IllegalStateException("Unsupported predicate type: " + ctx.kind.getText()); } else { outExpression = new InSubquery( valueExpression, new ListQuery(typedVisit(ctx.query())) ); } break; default: throw new IllegalStateException("Unsupported predicate type: " + ctx.kind.getText()); } return ctx.NOT() != null ? new Not(outExpression) : outExpression; }); } private List<NamedExpression> getNamedExpressions(NamedExpressionSeqContext namedCtx) { return ParserUtils.withOrigin(namedCtx, () -> { List<Expression> expressions = visit(namedCtx.namedExpression(), Expression.class); List<NamedExpression> namedExpressions = expressions.stream().map(expression -> { if (expression instanceof NamedExpression) { return (NamedExpression) expression; } else { return new UnboundAlias(expression); } }).collect(ImmutableList.toImmutableList()); return namedExpressions; }); } @Override public Expression visitSubqueryExpression(SubqueryExpressionContext subqueryExprCtx) { return ParserUtils.withOrigin(subqueryExprCtx, () -> new ScalarSubquery(typedVisit(subqueryExprCtx.query()))); } @Override public Expression visitExist(ExistContext context) { return ParserUtils.withOrigin(context, () -> new Exists(typedVisit(context.query()))); } }
class LogicalPlanBuilder extends DorisParserBaseVisitor<Object> { protected <T> T typedVisit(ParseTree ctx) { return (T) ctx.accept(this); } /** * Override the default behavior for all visit methods. This will only return a non-null result * when the context has only one child. This is done because there is no generic method to * combine the results of the context children. In all other cases null is returned. */ @Override public Object visitChildren(RuleNode node) { if (node.getChildCount() == 1) { return node.getChild(0).accept(this); } else { return null; } } @Override public LogicalPlan visitSingleStatement(SingleStatementContext ctx) { return ParserUtils.withOrigin(ctx, () -> (LogicalPlan) visit(ctx.statement())); } /** * Visit multi-statements. */ @Override public List<LogicalPlan> visitMultiStatements(MultiStatementsContext ctx) { return visit(ctx.statement(), LogicalPlan.class); } /* ******************************************************************************************** * Plan parsing * ******************************************************************************************** */ @Override public Command visitExplain(ExplainContext ctx) { LogicalPlan logicalPlan = plan(ctx.query()); ExplainLevel explainLevel = ExplainLevel.NORMAL; if (ctx.level != null) { explainLevel = ExplainLevel.valueOf(ctx.level.getText().toUpperCase(Locale.ROOT)); } return new ExplainCommand(explainLevel, logicalPlan); } @Override public LogicalPlan visitQuery(QueryContext ctx) { return ParserUtils.withOrigin(ctx, () -> { LogicalPlan query = plan(ctx.queryTerm()); return withQueryOrganization(query, ctx.queryOrganization()); }); } @Override public LogicalPlan visitRegularQuerySpecification(RegularQuerySpecificationContext ctx) { return ParserUtils.withOrigin(ctx, () -> { LogicalPlan relation = withRelation(Optional.ofNullable(ctx.fromClause())); return withSelectQuerySpecification( ctx, relation, ctx.selectClause(), Optional.ofNullable(ctx.whereClause()), Optional.ofNullable(ctx.aggClause()) ); }); } /** * Create an aliased table reference. This is typically used in FROM clauses. */ @Developing private LogicalPlan withTableAlias(LogicalPlan plan, TableAliasContext ctx) { String alias = ctx.strictIdentifier().getText(); if (null != ctx.identifierList()) { throw new ParseException("Do not implemented", ctx); } return new LogicalSubQueryAlias<>(alias, plan); } @Override public LogicalPlan visitTableName(TableNameContext ctx) { List<String> tableId = visitMultipartIdentifier(ctx.multipartIdentifier()); if (null == ctx.tableAlias().strictIdentifier()) { return new UnboundRelation(tableId); } return withTableAlias(new UnboundRelation(tableId), ctx.tableAlias()); } @Override public LogicalPlan visitAliasedQuery(AliasedQueryContext ctx) { return withTableAlias(visitQuery(ctx.query()), ctx.tableAlias()); } @Override /** * Create a star (i.e. all) expression; this selects all elements (in the specified object). * Both un-targeted (global) and targeted aliases are supported. */ @Override public Expression visitStar(StarContext ctx) { return ParserUtils.withOrigin(ctx, () -> { final QualifiedNameContext qualifiedNameContext = ctx.qualifiedName(); List<String> target; if (qualifiedNameContext != null) { target = qualifiedNameContext.identifier() .stream() .map(RuleContext::getText) .collect(ImmutableList.toImmutableList()); } else { target = Collections.emptyList(); } return new UnboundStar(target); }); } /** * Create an aliased expression if an alias is specified. Both single and multi-aliases are * supported. */ @Override public Expression visitNamedExpression(NamedExpressionContext ctx) { return ParserUtils.withOrigin(ctx, () -> { Expression expression = getExpression(ctx.expression()); if (ctx.name != null) { return new Alias(expression, ctx.name.getText()); } else { return expression; } }); } /** * Create a comparison expression. This compares two expressions. The following comparison * operators are supported: * - Equal: '=' or '==' * - Null-safe Equal: '<=>' * - Not Equal: '<>' or '!=' * - Less than: '<' * - Less then or Equal: '<=' * - Greater than: '>' * - Greater then or Equal: '>=' */ @Override public Expression visitComparison(ComparisonContext ctx) { return ParserUtils.withOrigin(ctx, () -> { Expression left = getExpression(ctx.left); Expression right = getExpression(ctx.right); TerminalNode operator = (TerminalNode) ctx.comparisonOperator().getChild(0); switch (operator.getSymbol().getType()) { case DorisParser.EQ: return new EqualTo(left, right); case DorisParser.NEQ: return new Not(new EqualTo(left, right)); case DorisParser.LT: return new LessThan(left, right); case DorisParser.GT: return new GreaterThan(left, right); case DorisParser.LTE: return new LessThanEqual(left, right); case DorisParser.GTE: return new GreaterThanEqual(left, right); case DorisParser.NSEQ: return new NullSafeEqual(left, right); default: throw new IllegalStateException("Unsupported comparison expression: " + operator.getSymbol().getText()); } }); } /** * Create a not expression. * format: NOT Expression * for example: * not 1 * not 1=1 */ @Override public Expression visitLogicalNot(LogicalNotContext ctx) { return ParserUtils.withOrigin(ctx, () -> new Not(getExpression(ctx.booleanExpression()))); } @Override public Expression visitLogicalBinary(LogicalBinaryContext ctx) { return ParserUtils.withOrigin(ctx, () -> { Expression left = getExpression(ctx.left); Expression right = getExpression(ctx.right); switch (ctx.operator.getType()) { case DorisParser.AND: return new And(left, right); case DorisParser.OR: return new Or(left, right); default: throw new IllegalStateException("Unsupported logical binary type: " + ctx.operator.getText()); } }); } /** * Create a predicated expression. A predicated expression is a normal expression with a * predicate attached to it, for example: * {{{ * a + 1 IS NULL * }}} */ @Override public Expression visitPredicated(PredicatedContext ctx) { return ParserUtils.withOrigin(ctx, () -> { Expression e = getExpression(ctx.valueExpression()); return ctx.predicate() == null ? e : withPredicate(e, ctx.predicate()); }); } @Override public Expression visitArithmeticUnary(ArithmeticUnaryContext ctx) { return ParserUtils.withOrigin(ctx, () -> { Expression e = getExpression(ctx); switch (ctx.operator.getType()) { case DorisParser.PLUS: return e; case DorisParser.MINUS: default: throw new IllegalStateException("Unsupported arithmetic unary type: " + ctx.operator.getText()); } }); } @Override public Expression visitArithmeticBinary(ArithmeticBinaryContext ctx) { return ParserUtils.withOrigin(ctx, () -> { Expression left = getExpression(ctx.left); Expression right = getExpression(ctx.right); int type = ctx.operator.getType(); if (left instanceof IntervalLiteral) { if (type != DorisParser.PLUS) { throw new IllegalArgumentException("Only supported: " + Operator.ADD); } IntervalLiteral interval = (IntervalLiteral) left; return new TimestampArithmetic(Operator.ADD, right, interval.value(), interval.timeUnit(), true); } if (right instanceof IntervalLiteral) { Operator op; if (type == DorisParser.PLUS) { op = Operator.ADD; } else if (type == DorisParser.MINUS) { op = Operator.SUBTRACT; } else { throw new IllegalArgumentException("Only supported: " + Operator.ADD + " and " + Operator.SUBTRACT); } IntervalLiteral interval = (IntervalLiteral) right; return new TimestampArithmetic(op, left, interval.value(), interval.timeUnit(), false); } return ParserUtils.withOrigin(ctx, () -> { switch (type) { case DorisParser.ASTERISK: return new Multiply(left, right); case DorisParser.SLASH: return new Divide(left, right); case DorisParser.PERCENT: return new Mod(left, right); case DorisParser.PLUS: return new Add(left, right); case DorisParser.MINUS: return new Subtract(left, right); default: throw new IllegalStateException( "Unsupported arithmetic binary type: " + ctx.operator.getText()); } }); }); } /** * Create a value based [[CaseWhen]] expression. This has the following SQL form: * {{{ * CASE [expression] * WHEN [value] THEN [expression] * ... * ELSE [expression] * END * }}} */ @Override public Expression visitSimpleCase(DorisParser.SimpleCaseContext context) { Expression e = getExpression(context.value); List<WhenClause> whenClauses = context.whenClause().stream() .map(w -> new WhenClause(new EqualTo(e, getExpression(w.condition)), getExpression(w.result))) .collect(Collectors.toList()); if (context.elseExpression == null) { return new CaseWhen(whenClauses); } return new CaseWhen(whenClauses, getExpression(context.elseExpression)); } /** * Create a condition based [[CaseWhen]] expression. This has the following SQL syntax: * {{{ * CASE * WHEN [predicate] THEN [expression] * ... * ELSE [expression] * END * }}} * * @param context the parse tree */ @Override public Expression visitSearchedCase(DorisParser.SearchedCaseContext context) { List<WhenClause> whenClauses = context.whenClause().stream() .map(w -> new WhenClause(getExpression(w.condition), getExpression(w.result))) .collect(Collectors.toList()); if (context.elseExpression == null) { return new CaseWhen(whenClauses); } return new CaseWhen(whenClauses, getExpression(context.elseExpression)); } @Override public Expression visitCast(DorisParser.CastContext ctx) { return ParserUtils.withOrigin(ctx, () -> new Cast(getExpression(ctx.expression()), ctx.identifier().getText())); } @Override public UnboundFunction visitExtract(DorisParser.ExtractContext ctx) { return ParserUtils.withOrigin(ctx, () -> { String functionName = ctx.field.getText(); return new UnboundFunction(functionName, false, false, Arrays.asList(getExpression(ctx.source))); }); } @Override public UnboundFunction visitFunctionCall(DorisParser.FunctionCallContext ctx) { return ParserUtils.withOrigin(ctx, () -> { String functionName = ctx.identifier().getText(); boolean isDistinct = ctx.DISTINCT() != null; List<Expression> params = visit(ctx.expression(), Expression.class); for (Expression expression : params) { if (expression instanceof UnboundStar && functionName.equalsIgnoreCase("count") && !isDistinct) { return new UnboundFunction(functionName, false, true, new ArrayList<>()); } } return new UnboundFunction(functionName, isDistinct, false, params); }); } @Override public Expression visitInterval(IntervalContext ctx) { return new IntervalLiteral(getExpression(ctx.value), visitUnitIdentifier(ctx.unit)); } @Override public String visitUnitIdentifier(UnitIdentifierContext ctx) { return ctx.getText(); } @Override public Expression visitTypeConstructor(TypeConstructorContext ctx) { String value = ctx.STRING().getText(); value = value.substring(1, value.length() - 1); String type = ctx.identifier().getText().toUpperCase(); switch (type) { case "DATE": return new DateLiteral(value); case "DATETIME": return new DateTimeLiteral(value); default: throw new IllegalStateException("Unsupported data type : " + type); } } @Override public Expression visitDereference(DereferenceContext ctx) { return ParserUtils.withOrigin(ctx, () -> { Expression e = getExpression(ctx.base); if (e instanceof UnboundSlot) { UnboundSlot unboundAttribute = (UnboundSlot) e; List<String> nameParts = Lists.newArrayList(unboundAttribute.getNameParts()); nameParts.add(ctx.fieldName.getText()); return new UnboundSlot(nameParts); } else { throw new IllegalStateException("Unsupported dereference expression: " + ctx.getText()); } }); } @Override public UnboundSlot visitColumnReference(ColumnReferenceContext ctx) { return UnboundSlot.quoted(ctx.getText()); } /** * Create a NULL literal expression. */ @Override public Expression visitNullLiteral(NullLiteralContext ctx) { return new NullLiteral(); } @Override public Literal visitBooleanLiteral(BooleanLiteralContext ctx) { Boolean b = Boolean.valueOf(ctx.getText()); return new BooleanLiteral(b); } @Override public Literal visitIntegerLiteral(IntegerLiteralContext ctx) { Integer l = Integer.valueOf(ctx.getText()); return new IntegerLiteral(l); } @Override public Literal visitStringLiteral(StringLiteralContext ctx) { String s = ctx.STRING().stream() .map(ParseTree::getText) .map(str -> str.substring(1, str.length() - 1)) .reduce((s1, s2) -> s1 + s2) .orElse(""); return new StringLiteral(s); } @Override public Expression visitParenthesizedExpression(ParenthesizedExpressionContext ctx) { return getExpression(ctx.expression()); } @Override public List<Expression> visitNamedExpressionSeq(NamedExpressionSeqContext namedCtx) { return visit(namedCtx.namedExpression(), Expression.class); } @Override public LogicalPlan visitRelation(RelationContext ctx) { LogicalPlan right = plan(ctx.relationPrimary()); if (ctx.LATERAL() != null) { if (!(right instanceof LogicalSubQueryAlias)) { throw new IllegalStateException("lateral join right table should be sub-query"); } } return right; } @Override public LogicalPlan visitFromClause(FromClauseContext ctx) { return ParserUtils.withOrigin(ctx, () -> { LogicalPlan left = null; for (RelationContext relation : ctx.relation()) { LogicalPlan right = visitRelation(relation); left = (left == null) ? right : new LogicalJoin<>( JoinType.CROSS_JOIN, Optional.empty(), left, right); left = withJoinRelations(left, relation); } return left; }); } /* ******************************************************************************************** * Table Identifier parsing * ******************************************************************************************** */ @Override public List<String> visitMultipartIdentifier(MultipartIdentifierContext ctx) { return ctx.parts.stream() .map(RuleContext::getText) .collect(ImmutableList.toImmutableList()); } /** * Create a Sequence of Strings for a parenthesis enclosed alias list. */ @Override public List<String> visitIdentifierList(IdentifierListContext ctx) { return visitIdentifierSeq(ctx.identifierSeq()); } /** * Create a Sequence of Strings for an identifier list. */ @Override public List<String> visitIdentifierSeq(IdentifierSeqContext ctx) { return ctx.ident.stream() .map(RuleContext::getText) .collect(ImmutableList.toImmutableList()); } /** * get OrderKey. * * @param ctx SortItemContext * @return SortItems */ @Override public OrderKey visitSortItem(SortItemContext ctx) { return ParserUtils.withOrigin(ctx, () -> { boolean isAsc = ctx.DESC() == null; boolean isNullFirst = true; Expression expression = typedVisit(ctx.expression()); return new OrderKey(expression, isAsc, isNullFirst); }); } private <T> List<T> visit(List<? extends ParserRuleContext> contexts, Class<T> clazz) { return contexts.stream() .map(this::visit) .map(clazz::cast) .collect(ImmutableList.toImmutableList()); } private LogicalPlan plan(ParserRuleContext tree) { return (LogicalPlan) tree.accept(this); } /* ******************************************************************************************** * Expression parsing * ******************************************************************************************** */ /** * Create an expression from the given context. This method just passes the context on to the * visitor and only takes care of typing (We assume that the visitor returns an Expression here). */ private Expression getExpression(ParserRuleContext ctx) { return typedVisit(ctx); } private LogicalPlan withQueryOrganization(LogicalPlan inputPlan, QueryOrganizationContext ctx) { Optional<SortClauseContext> sortClauseContext = Optional.ofNullable(ctx.sortClause()); Optional<LimitClauseContext> limitClauseContext = Optional.ofNullable(ctx.limitClause()); LogicalPlan sort = withSort(inputPlan, sortClauseContext); return withLimit(sort, limitClauseContext); } private LogicalPlan withSort(LogicalPlan input, Optional<SortClauseContext> sortCtx) { return input.optionalMap(sortCtx, () -> { List<OrderKey> orderKeys = visit(sortCtx.get().sortItem(), OrderKey.class); return new LogicalSort(orderKeys, input); }); } private LogicalPlan withLimit(LogicalPlan input, Optional<LimitClauseContext> limitCtx) { return input.optionalMap(limitCtx, () -> { long limit = Long.parseLong(limitCtx.get().limit.getText()); long offset = 0; Token offsetToken = limitCtx.get().offset; if (offsetToken != null) { if (input instanceof LogicalSort) { offset = Long.parseLong(offsetToken.getText()); } else { throw new IllegalStateException("OFFSET requires an ORDER BY clause"); } } return new LogicalLimit<>(limit, offset, input); }); } /** * Add a regular (SELECT) query specification to a logical plan. The query specification * is the core of the logical plan, this is where sourcing (FROM clause), projection (SELECT), * aggregation (GROUP BY ... HAVING ...) and filtering (WHERE) takes place. * * <p>Note that query hints are ignored (both by the parser and the builder). */ private LogicalPlan withSelectQuerySpecification( ParserRuleContext ctx, LogicalPlan inputRelation, SelectClauseContext selectClause, Optional<WhereClauseContext> whereClause, Optional<AggClauseContext> aggClause) { return ParserUtils.withOrigin(ctx, () -> { LogicalPlan filter = withFilter(inputRelation, whereClause); LogicalPlan aggregate = withAggregate(filter, selectClause, aggClause); LogicalPlan having = aggregate; LogicalPlan projection = withProjection(having, selectClause, aggClause); return projection; }); } private LogicalPlan withRelation(Optional<FromClauseContext> ctx) { if (ctx.isPresent()) { return visitFromClause(ctx.get()); } else { throw new IllegalStateException("Unsupported one row relation"); } } /** * Join one more [[LogicalPlan]]s to the current logical plan. */ private LogicalPlan withJoinRelations(LogicalPlan input, RelationContext ctx) { LogicalPlan last = input; for (JoinRelationContext join : ctx.joinRelation()) { JoinType joinType; if (join.joinType().CROSS() != null) { joinType = JoinType.CROSS_JOIN; } else if (join.joinType().FULL() != null) { joinType = JoinType.FULL_OUTER_JOIN; } else if (join.joinType().SEMI() != null) { if (join.joinType().LEFT() != null) { joinType = JoinType.LEFT_SEMI_JOIN; } else { joinType = JoinType.RIGHT_SEMI_JOIN; } } else if (join.joinType().ANTI() != null) { if (join.joinType().LEFT() != null) { joinType = JoinType.LEFT_ANTI_JOIN; } else { joinType = JoinType.RIGHT_ANTI_JOIN; } } else if (join.joinType().LEFT() != null) { joinType = JoinType.LEFT_OUTER_JOIN; } else if (join.joinType().RIGHT() != null) { joinType = JoinType.RIGHT_OUTER_JOIN; } else { joinType = JoinType.INNER_JOIN; } JoinCriteriaContext joinCriteria = join.joinCriteria(); Expression condition; if (joinCriteria == null) { condition = null; } else { condition = getExpression(joinCriteria.booleanExpression()); } last = new LogicalJoin<>(joinType, Optional.ofNullable(condition), last, plan(join.relationPrimary())); } return last; } private LogicalPlan withProjection(LogicalPlan input, SelectClauseContext selectCtx, Optional<AggClauseContext> aggCtx) { return ParserUtils.withOrigin(selectCtx, () -> { if (aggCtx.isPresent()) { return input; } else { List<NamedExpression> projects = getNamedExpressions(selectCtx.namedExpressionSeq()); return new LogicalProject<>(projects, input); } }); } private LogicalPlan withFilter(LogicalPlan input, Optional<WhereClauseContext> whereCtx) { return input.optionalMap(whereCtx, () -> new LogicalFilter<>(getExpression((whereCtx.get().booleanExpression())), input) ); } private LogicalPlan withAggregate(LogicalPlan input, SelectClauseContext selectCtx, Optional<AggClauseContext> aggCtx) { return input.optionalMap(aggCtx, () -> { List<Expression> groupByExpressions = visit(aggCtx.get().groupByItem().expression(), Expression.class); List<NamedExpression> namedExpressions = getNamedExpressions(selectCtx.namedExpressionSeq()); return new LogicalAggregate<>(groupByExpressions, namedExpressions, input); }); } /** * match predicate type and generate different predicates. * * @param ctx PredicateContext * @param valueExpression valueExpression * @return Expression */ private Expression withPredicate(Expression valueExpression, PredicateContext ctx) { return ParserUtils.withOrigin(ctx, () -> { Expression outExpression; switch (ctx.kind.getType()) { case DorisParser.BETWEEN: outExpression = new Between( valueExpression, getExpression(ctx.lower), getExpression(ctx.upper) ); break; case DorisParser.LIKE: outExpression = new Like( valueExpression, getExpression(ctx.pattern) ); break; case DorisParser.REGEXP: outExpression = new Regexp( valueExpression, getExpression(ctx.pattern) ); break; case DorisParser.IN: if (ctx.query() == null) { outExpression = new InPredicate( valueExpression, withInList(ctx) ); } else { outExpression = new InSubquery( valueExpression, new ListQuery(typedVisit(ctx.query())) ); } break; default: throw new IllegalStateException("Unsupported predicate type: " + ctx.kind.getText()); } return ctx.NOT() != null ? new Not(outExpression) : outExpression; }); } private List<NamedExpression> getNamedExpressions(NamedExpressionSeqContext namedCtx) { return ParserUtils.withOrigin(namedCtx, () -> { List<Expression> expressions = visit(namedCtx.namedExpression(), Expression.class); List<NamedExpression> namedExpressions = expressions.stream().map(expression -> { if (expression instanceof NamedExpression) { return (NamedExpression) expression; } else { return new UnboundAlias(expression); } }).collect(ImmutableList.toImmutableList()); return namedExpressions; }); } @Override public Expression visitSubqueryExpression(SubqueryExpressionContext subqueryExprCtx) { return ParserUtils.withOrigin(subqueryExprCtx, () -> new ScalarSubquery(typedVisit(subqueryExprCtx.query()))); } @Override public Expression visitExist(ExistContext context) { return ParserUtils.withOrigin(context, () -> new Exists(typedVisit(context.query()))); } public List<Expression> withInList(PredicateContext ctx) { List<Expression> expressions = ctx.expression().stream() .map(this::getExpression).collect(ImmutableList.toImmutableList()); return expressions; } }
`root.setBool("removalPossible", failure.isEmpty());`
private void hostLossPossibleToSlime(Cursor root, Optional<CapacityChecker.HostFailurePath> failure, List<Node> hostsToRemove) { var hosts = root.setArray("hostsToRemove"); hostsToRemove.forEach(h -> hosts.addString(h.hostname())); CapacityChecker.AllocationHistory history = capacityChecker.allocationHistory; if (failure.isEmpty()) { root.setBool("removalPossible", true); } else { root.setBool("removalPossible", false); } var arr = root.setArray("history"); for (var entry : history.historyEntries) { var object = arr.addObject(); object.setString("tenant", entry.tenant.hostname()); if (entry.newParent != null) { object.setString("newParent", entry.newParent.hostname()); } object.setLong("eligibleParents", entry.eligibleParents); } }
if (failure.isEmpty()) {
private void hostLossPossibleToSlime(Cursor root, Optional<CapacityChecker.HostFailurePath> failure, List<Node> hostsToRemove) { var hosts = root.setArray("hostsToRemove"); hostsToRemove.forEach(h -> hosts.addString(h.hostname())); CapacityChecker.AllocationHistory history = capacityChecker.allocationHistory; root.setBool("removalPossible", failure.isEmpty()); var arr = root.setArray("history"); for (var entry : history.historyEntries) { var object = arr.addObject(); object.setString("tenant", entry.tenant.hostname()); if (entry.newParent != null) { object.setString("newParent", entry.newParent.hostname()); } object.setLong("eligibleParents", entry.eligibleParents); } }
class HostCapacityResponse extends HttpResponse { private final StringBuilder text; private final Slime slime; private final CapacityChecker capacityChecker; private final boolean json; public HostCapacityResponse(NodeRepository nodeRepository, HttpRequest request) { super(200); capacityChecker = new CapacityChecker(nodeRepository); json = request.getBooleanProperty("json"); String hostsJson = request.getProperty("hosts"); text = new StringBuilder(); slime = new Slime(); Cursor root = slime.setObject(); if (hostsJson != null) { ObjectMapper om = new ObjectMapper(); String[] hostsArray; try { hostsArray = om.readValue(hostsJson, String[].class); } catch (Exception e) { throw new IllegalArgumentException(e.getMessage()); } List<String> hostNames = Arrays.asList(hostsArray); List<Node> hosts; try { hosts = capacityChecker.nodesFromHostnames(hostNames); } catch (IllegalArgumentException e) { throw new NotFoundException(e.getMessage()); } var failure = capacityChecker.findHostRemovalFailure(hosts); if (failure.isPresent() && failure.get().failureReason.failureReasons.size() == 0) { root.setBool("removalPossible", false); error(root, "Removing all hosts is trivially impossible."); } else { if (json) hostLossPossibleToSlime(root, failure, hosts); else hostLossPossibleToText(failure, hosts); } } else { var failurePath = capacityChecker.worstCaseHostLossLeadingToFailure(); if (failurePath.isPresent()) { if (json) zoneFailurePathToSlime(root, failurePath.get()); else zoneFailurePathToText(failurePath.get()); } else { error(root, "Node repository contained no hosts."); } } } private void error(Cursor root, String errorMessage) { if (json) root.setString("error", errorMessage); else text.append(errorMessage); } private void hostLossPossibleToText(Optional<CapacityChecker.HostFailurePath> failure, List<Node> hostsToRemove) { text.append(String.format("Attempting to remove %d hosts: ", hostsToRemove.size())); CapacityChecker.AllocationHistory history = capacityChecker.allocationHistory; if (failure.isEmpty()) { text.append("OK\n\n"); text.append(history); if (history.oldParents().size() != hostsToRemove.size()) { long emptyHostCount = hostsToRemove.size() - history.oldParents().size(); text.append(String.format("\nTrivially removed %d empty host%s.", emptyHostCount, emptyHostCount > 1 ? "s" : "")); } } else { text.append("FAILURE\n\n"); text.append(history).append("\n"); text.append(failure.get().failureReason).append("\n\n"); } } private void zoneFailurePathToText(CapacityChecker.HostFailurePath failurePath) { text.append(String.format("Found %d hosts. Failure upon trying to remove %d hosts:\n\n", capacityChecker.getHosts().size(), failurePath.hostsCausingFailure.size())); text.append(capacityChecker.allocationHistory).append("\n"); text.append(failurePath.failureReason); } public void zoneFailurePathToSlime(Cursor object, CapacityChecker.HostFailurePath failurePath) { object.setLong("totalHosts", capacityChecker.getHosts().size()); object.setLong("couldLoseHosts", failurePath.hostsCausingFailure.size()); failurePath.failureReason.host.ifPresent(host -> object.setString("failedTenantParent", host.hostname()) ); failurePath.failureReason.tenant.ifPresent(tenant -> { object.setString("failedTenant", tenant.hostname()); object.setString("failedTenantResources", tenant.flavor().resources().toString()); tenant.allocation().ifPresent(allocation -> object.setString("failedTenantAllocation", allocation.toString()) ); var explanation = object.setObject("hostCandidateRejectionReasons"); allocationFailureReasonListToSlime(explanation.setObject("singularReasonFailures"), failurePath.failureReason.failureReasons.singularReasonFailures()); allocationFailureReasonListToSlime(explanation.setObject("totalFailures"), failurePath.failureReason.failureReasons); }); var details = object.setObject("details"); hostLossPossibleToSlime(details, Optional.of(failurePath), failurePath.hostsCausingFailure); } private void allocationFailureReasonListToSlime(Cursor root, CapacityChecker.AllocationFailureReasonList allocationFailureReasonList) { root.setLong("insufficientVcpu", allocationFailureReasonList.insufficientVcpu()); root.setLong("insufficientMemoryGb", allocationFailureReasonList.insufficientMemoryGb()); root.setLong("insufficientDiskGb", allocationFailureReasonList.insufficientDiskGb()); root.setLong("incompatibleDiskSpeed", allocationFailureReasonList.incompatibleDiskSpeed()); root.setLong("insufficientAvailableIps", allocationFailureReasonList.insufficientAvailableIps()); root.setLong("violatesParentHostPolicy", allocationFailureReasonList.violatesParentHostPolicy()); } @Override public void render(OutputStream stream) throws IOException { if (json) new JsonFormat(true).encode(stream, slime); else stream.write(text.toString().getBytes()); } @Override public String getContentType() { return json ? "application/json" : "text/plain"; } }
class HostCapacityResponse extends HttpResponse { private final StringBuilder text; private final Slime slime; private final CapacityChecker capacityChecker; private final boolean json; public HostCapacityResponse(NodeRepository nodeRepository, HttpRequest request) { super(200); capacityChecker = new CapacityChecker(nodeRepository); json = request.getBooleanProperty("json"); String hostsJson = request.getProperty("hosts"); text = new StringBuilder(); slime = new Slime(); Cursor root = slime.setObject(); if (hostsJson != null) { List<Node> hosts = parseHostList(hostsJson); hostRemovalResponse(root, hosts); } else { zoneFailureReponse(root); } } private List<Node> parseHostList(String hosts) { ObjectMapper om = new ObjectMapper(); String[] hostsArray; try { hostsArray = om.readValue(hosts, String[].class); } catch (Exception e) { throw new IllegalArgumentException(e.getMessage()); } List<String> hostNames = Arrays.asList(hostsArray); try { return capacityChecker.nodesFromHostnames(hostNames); } catch (IllegalArgumentException e) { throw new NotFoundException(e.getMessage()); } } private void hostRemovalResponse(Cursor root, List<Node> hosts) { var failure = capacityChecker.findHostRemovalFailure(hosts); if (failure.isPresent() && failure.get().failureReason.allocationFailures.size() == 0) { root.setBool("removalPossible", false); error(root, "Removing all hosts is trivially impossible."); } else { if (json) hostLossPossibleToSlime(root, failure, hosts); else hostLossPossibleToText(failure, hosts); } } private void zoneFailureReponse(Cursor root) { var failurePath = capacityChecker.worstCaseHostLossLeadingToFailure(); if (failurePath.isPresent()) { if (json) zoneFailurePathToSlime(root, failurePath.get()); else zoneFailurePathToText(failurePath.get()); } else { error(root, "Node repository contained no hosts."); } } private void error(Cursor root, String errorMessage) { if (json) root.setString("error", errorMessage); else text.append(errorMessage); } private void hostLossPossibleToText(Optional<CapacityChecker.HostFailurePath> failure, List<Node> hostsToRemove) { text.append(String.format("Attempting to remove %d hosts: ", hostsToRemove.size())); CapacityChecker.AllocationHistory history = capacityChecker.allocationHistory; if (failure.isEmpty()) { text.append("OK\n\n"); text.append(history); if (history.oldParents().size() != hostsToRemove.size()) { long emptyHostCount = hostsToRemove.size() - history.oldParents().size(); text.append(String.format("\nTrivially removed %d empty host%s.", emptyHostCount, emptyHostCount > 1 ? "s" : "")); } } else { text.append("FAILURE\n\n"); text.append(history).append("\n"); text.append(failure.get().failureReason).append("\n\n"); } } private void zoneFailurePathToText(CapacityChecker.HostFailurePath failurePath) { text.append(String.format("Found %d hosts. Failure upon trying to remove %d hosts:\n\n", capacityChecker.getHosts().size(), failurePath.hostsCausingFailure.size())); text.append(capacityChecker.allocationHistory).append("\n"); text.append(failurePath.failureReason); } private void zoneFailurePathToSlime(Cursor object, CapacityChecker.HostFailurePath failurePath) { object.setLong("totalHosts", capacityChecker.getHosts().size()); object.setLong("couldLoseHosts", failurePath.hostsCausingFailure.size()); failurePath.failureReason.host.ifPresent(host -> object.setString("failedTenantParent", host.hostname()) ); failurePath.failureReason.tenant.ifPresent(tenant -> { object.setString("failedTenant", tenant.hostname()); object.setString("failedTenantResources", tenant.flavor().resources().toString()); tenant.allocation().ifPresent(allocation -> object.setString("failedTenantAllocation", allocation.toString()) ); var explanation = object.setObject("hostCandidateRejectionReasons"); allocationFailureReasonListToSlime(explanation.setObject("singularReasonFailures"), failurePath.failureReason.allocationFailures.singularReasonFailures()); allocationFailureReasonListToSlime(explanation.setObject("totalFailures"), failurePath.failureReason.allocationFailures); }); var details = object.setObject("details"); hostLossPossibleToSlime(details, Optional.of(failurePath), failurePath.hostsCausingFailure); } private void allocationFailureReasonListToSlime(Cursor root, CapacityChecker.AllocationFailureReasonList allocationFailureReasonList) { root.setLong("insufficientVcpu", allocationFailureReasonList.insufficientVcpu()); root.setLong("insufficientMemoryGb", allocationFailureReasonList.insufficientMemoryGb()); root.setLong("insufficientDiskGb", allocationFailureReasonList.insufficientDiskGb()); root.setLong("incompatibleDiskSpeed", allocationFailureReasonList.incompatibleDiskSpeed()); root.setLong("insufficientAvailableIps", allocationFailureReasonList.insufficientAvailableIps()); root.setLong("violatesParentHostPolicy", allocationFailureReasonList.violatesParentHostPolicy()); } @Override public void render(OutputStream stream) throws IOException { if (json) new JsonFormat(true).encode(stream, slime); else stream.write(text.toString().getBytes()); } @Override public String getContentType() { return json ? "application/json" : "text/plain"; } }
I had forgotten to delete those -- they were used to prototype the response design.
public void testWithRealData() throws IOException { String path = "./src/test/resources/zookeeper_dump.json"; tester.cleanRepository(); tester.restoreNodeRepositoryFromJsonFile(Paths.get(path)); var failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); if (failurePath.isPresent()) { System.out.println("Worst case host loss : " + failurePath.get().hostsCausingFailure.size()); assertTrue(tester.nodeRepository.getNodes(NodeType.host).containsAll(failurePath.get().hostsCausingFailure)); } else fail(); }
System.out.println("Worst case host loss : " + failurePath.get().hostsCausingFailure.size());
public void testWithRealData() throws IOException { String path = "./src/test/resources/zookeeper_dump.json"; tester.cleanRepository(); tester.restoreNodeRepositoryFromJsonFile(Paths.get(path)); var failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); assertTrue(tester.nodeRepository.getNodes(NodeType.host).containsAll(failurePath.get().hostsCausingFailure)); }
class CapacityCheckerTest { private CapacityCheckerTester tester; @Before public void setup() { tester = new CapacityCheckerTester(); } @Test @Test public void testOvercommittedHosts() { tester.createNodes(7, 4, 10, new NodeResources(-1, 10, 100), 10, 0, new NodeResources(1, 10, 100), 10); int overcommittedHosts = tester.capacityChecker.findOvercommittedHosts().size(); assertEquals(tester.nodeRepository.getNodes(NodeType.host).size(), overcommittedHosts); } @Test public void testEdgeCaseFailurePaths() { tester.createNodes(1, 1, 0, new NodeResources(1, 10, 100), 10, 0, new NodeResources(1, 10, 100), 10); var failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertFalse("Computing worst case host loss with no hosts should return an empty optional.", failurePath.isPresent()); tester.createNodes(1, 10, 10, new NodeResources(10, 1000, 10000), 100, 1, new NodeResources(10, 1000, 10000), 100); failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); assertTrue("Computing worst case host loss if all hosts have to be removed should result in an non-empty failureReason with empty nodes.", failurePath.get().failureReason.tenant.isEmpty() && failurePath.get().failureReason.host.isEmpty()); assertEquals(tester.nodeRepository.getNodes(NodeType.host).size(), failurePath.get().hostsCausingFailure.size()); tester.createNodes(3, 30, 10, new NodeResources(0, 0, 10000), 1000, 0, new NodeResources(0, 0, 0), 0); failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.failureReasons; assertEquals("When there are multiple lacking resources, all failures are multipleReasonFailures", failureReasons.size(), failureReasons.multipleReasonFailures().size()); assertEquals(0, failureReasons.singularReasonFailures().size()); } else fail(); } @Test public void testIpFailurePaths() { tester.createNodes(1, 10, 10, new NodeResources(10, 1000, 10000), 1, 10, new NodeResources(10, 1000, 10000), 1); var failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.failureReasons; assertEquals("All failures should be due to hosts having a lack of available ip addresses.", failureReasons.singularReasonFailures().insufficientAvailableIps(), failureReasons.size()); } else fail(); } @Test public void testNodeResourceFailurePaths() { tester.createNodes(1, 10, 10, new NodeResources(1, 100, 1000), 100, 10, new NodeResources(0, 100, 1000), 100); var failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.failureReasons; assertEquals("All failures should be due to hosts lacking cpu cores.", failureReasons.singularReasonFailures().insufficientVcpu(), failureReasons.size()); } else fail(); tester.createNodes(1, 10, 10, new NodeResources(10, 1, 1000), 100, 10, new NodeResources(10, 0, 1000), 100); failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.failureReasons; assertEquals("All failures should be due to hosts lacking memory.", failureReasons.singularReasonFailures().insufficientMemoryGb(), failureReasons.size()); } else fail(); tester.createNodes(1, 10, 10, new NodeResources(10, 100, 10), 100, 10, new NodeResources(10, 100, 0), 100); failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.failureReasons; assertEquals("All failures should be due to hosts lacking disk space.", failureReasons.singularReasonFailures().insufficientDiskGb(), failureReasons.size()); } else fail(); int emptyHostsWithSlowDisk = 10; tester.createNodes(1, 10, List.of(new NodeResources(1, 10, 100)), 10, new NodeResources(0, 0, 0), 100, 10, new NodeResources(10, 1000, 10000, NodeResources.DiskSpeed.slow), 100); failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.failureReasons; assertEquals("All empty hosts should be invalid due to having incompatible disk speed.", failureReasons.singularReasonFailures().incompatibleDiskSpeed(), emptyHostsWithSlowDisk); } else fail(); } @Test public void testParentHostPolicyIntegrityFailurePaths() { tester.createNodes(1, 1, 10, new NodeResources(1, 100, 1000), 100, 10, new NodeResources(10, 1000, 10000), 100); var failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.failureReasons; assertEquals("With only one type of tenant, all failures should be due to violation of the parent host policy.", failureReasons.singularReasonFailures().violatesParentHostPolicy(), failureReasons.size()); } else fail(); tester.createNodes(1, 2, 10, new NodeResources(10, 100, 1000), 1, 0, new NodeResources(0, 0, 0), 0); failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.failureReasons; assertNotEquals("Fewer distinct children than hosts should result in some parent host policy violations.", failureReasons.size(), failureReasons.singularReasonFailures().violatesParentHostPolicy()); assertNotEquals(0, failureReasons.singularReasonFailures().violatesParentHostPolicy()); } else fail(); } }
class CapacityCheckerTest { private CapacityCheckerTester tester; @Before public void setup() { tester = new CapacityCheckerTester(); } @Test @Test public void testOvercommittedHosts() { tester.createNodes(7, 4, 10, new NodeResources(-1, 10, 100), 10, 0, new NodeResources(1, 10, 100), 10); int overcommittedHosts = tester.capacityChecker.findOvercommittedHosts().size(); assertEquals(tester.nodeRepository.getNodes(NodeType.host).size(), overcommittedHosts); } @Test public void testEdgeCaseFailurePaths() { tester.createNodes(1, 1, 0, new NodeResources(1, 10, 100), 10, 0, new NodeResources(1, 10, 100), 10); var failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertFalse("Computing worst case host loss with no hosts should return an empty optional.", failurePath.isPresent()); tester.createNodes(1, 10, 10, new NodeResources(10, 1000, 10000), 100, 1, new NodeResources(10, 1000, 10000), 100); failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); assertTrue("Computing worst case host loss if all hosts have to be removed should result in an non-empty failureReason with empty nodes.", failurePath.get().failureReason.tenant.isEmpty() && failurePath.get().failureReason.host.isEmpty()); assertEquals(tester.nodeRepository.getNodes(NodeType.host).size(), failurePath.get().hostsCausingFailure.size()); tester.createNodes(3, 30, 10, new NodeResources(0, 0, 10000), 1000, 0, new NodeResources(0, 0, 0), 0); failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.allocationFailures; assertEquals("When there are multiple lacking resources, all failures are multipleReasonFailures", failureReasons.size(), failureReasons.multipleReasonFailures().size()); assertEquals(0, failureReasons.singularReasonFailures().size()); } else fail(); } @Test public void testIpFailurePaths() { tester.createNodes(1, 10, 10, new NodeResources(10, 1000, 10000), 1, 10, new NodeResources(10, 1000, 10000), 1); var failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.allocationFailures; assertEquals("All failures should be due to hosts having a lack of available ip addresses.", failureReasons.singularReasonFailures().insufficientAvailableIps(), failureReasons.size()); } else fail(); } @Test public void testNodeResourceFailurePaths() { tester.createNodes(1, 10, 10, new NodeResources(1, 100, 1000), 100, 10, new NodeResources(0, 100, 1000), 100); var failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.allocationFailures; assertEquals("All failures should be due to hosts lacking cpu cores.", failureReasons.singularReasonFailures().insufficientVcpu(), failureReasons.size()); } else fail(); tester.createNodes(1, 10, 10, new NodeResources(10, 1, 1000), 100, 10, new NodeResources(10, 0, 1000), 100); failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.allocationFailures; assertEquals("All failures should be due to hosts lacking memory.", failureReasons.singularReasonFailures().insufficientMemoryGb(), failureReasons.size()); } else fail(); tester.createNodes(1, 10, 10, new NodeResources(10, 100, 10), 100, 10, new NodeResources(10, 100, 0), 100); failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.allocationFailures; assertEquals("All failures should be due to hosts lacking disk space.", failureReasons.singularReasonFailures().insufficientDiskGb(), failureReasons.size()); } else fail(); int emptyHostsWithSlowDisk = 10; tester.createNodes(1, 10, List.of(new NodeResources(1, 10, 100)), 10, new NodeResources(0, 0, 0), 100, 10, new NodeResources(10, 1000, 10000, NodeResources.DiskSpeed.slow), 100); failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.allocationFailures; assertEquals("All empty hosts should be invalid due to having incompatible disk speed.", failureReasons.singularReasonFailures().incompatibleDiskSpeed(), emptyHostsWithSlowDisk); } else fail(); } @Test public void testParentHostPolicyIntegrityFailurePaths() { tester.createNodes(1, 1, 10, new NodeResources(1, 100, 1000), 100, 10, new NodeResources(10, 1000, 10000), 100); var failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.allocationFailures; assertEquals("With only one type of tenant, all failures should be due to violation of the parent host policy.", failureReasons.singularReasonFailures().violatesParentHostPolicy(), failureReasons.size()); } else fail(); tester.createNodes(1, 2, 10, new NodeResources(10, 100, 1000), 1, 0, new NodeResources(0, 0, 0), 0); failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); if (failurePath.get().failureReason.tenant.isPresent()) { var failureReasons = failurePath.get().failureReason.allocationFailures; assertNotEquals("Fewer distinct children than hosts should result in some parent host policy violations.", failureReasons.size(), failureReasons.singularReasonFailures().violatesParentHostPolicy()); assertNotEquals(0, failureReasons.singularReasonFailures().violatesParentHostPolicy()); } else fail(); } }